Beispiel #1
0
 def __init__(self,mslist,mss=None):
     """
     mslist is the MS list filename
     """
     import pyrap.tables as pt
     if mss is not None:
         self.mss=mss
         self.mslist=None
     else:
         self.mslist=mslist
         self.mss=[s.strip() for s in open(mslist).readlines()]
     self.obsids = [os.path.basename(ms).split('_')[0] for ms in self.mss]
     self.freqs=[]
     self.channels=[]
     self.hascorrected=[]
     self.dysco=[]
     for ms in self.mss:
         t = pt.table(ms,readonly=True,ack=False)
         colname='CORRECTED_DATA'
         try:
             dummy=t.getcoldesc(colname)
         except RuntimeError:
             dummy=None
         self.hascorrected.append(not(dummy is None))
         self.dysco.append('Dysco' in t.showstructure())
         t.close()
         t = pt.table(ms+'/SPECTRAL_WINDOW', readonly=True, ack=False)
         self.freqs.append(t[0]['REF_FREQUENCY'])
         self.channels.append(t[0]['CHAN_FREQ'])
Beispiel #2
0
def updateMSmetadata(msfile):
  #Update history to show that this script has modified original data
  tms = pt.table(msfile,readonly=False,ack=False)
  th = pt.table(tms.getkeyword('HISTORY'), readonly=False, ack=False)
  nr=th.nrows()
  th.addrows(1)
  tr=th.row()
  tr.put(nr,{'TIME': quantity('today').get('s').get_value(),
             'OBSERVATION_ID':0,
             'MESSAGE': 'Applied polarization modifications',
             'PRIORITY': 'NORMAL',
             'ORIGIN': '%s: version = %s' % (__file__,__version__),
             'OBJECT_ID':0, 
             'APPLICATION':__file__,
             'CLI_COMMAND':sys.argv,
             'APP_PARAMS': ['']})

  if not options.linear:
     #Change metadata information to be circular feeds
     feed = pt.table(tms.getkeyword('FEED'),readonly=False,ack=False)
     for tpart in feed.iter('ANTENNA_ID'):
      tpart.putcell('POLARIZATION_TYPE',0,['R','L'])

     polariz = pt.table(tms.getkeyword('POLARIZATION'),readonly=False,ack=False)
     polariz.putcell('CORR_TYPE',0,[5,6,7,8])
     tms.close()
    def __init__(self, ms):
        self.timepara={'start':0, 'end':0, 'step':0, 'cent':0}
	self.freqpara={'start':0, 'end':0, 'step':0, 'cent':0}
	self.msname = ms
	if not os.path.isdir(ms): sys.exit('INPUT MS DOES NOT EXIST!')
        ##########Getting Time parameters first#############
	t = pt.table(ms, readonly=True, ack=False)
	t1 = t.sort ('unique desc TIME')
	self.timepara['step'] = t.getcell('EXPOSURE',0)
	self.timepara['start'] =  np.min(t.getcol('TIME'))-self.timepara['step']/2.
	self.timepara['end'] =  np.max(t.getcol('TIME'))+self.timepara['step']/2.
	self.timepara['cent'] = self.timepara['start']+(self.timepara['end']-self.timepara['start'])/2.
	self.mstimevalues = t1.getcol('TIME')[::-1]
	t1.close()
        ##########Getting Frequency Parameters###################
	freq=pt.table(t.getkeyword("SPECTRAL_WINDOW"), readonly=True, ack=False)
	self.fullband = freq.getcell('TOTAL_BANDWIDTH', 0)
	self.freqpara['cent'] = freq.getcell('REF_FREQUENCY', 0)
	self.freqpara['step'] = freq.getcell('CHAN_WIDTH', 0)[0]
        self.msfreqvalues = freq.getcell('CHAN_FREQ', 0)
	self.freqpara['start'] = self.msfreqvalues[0]-self.freqpara['step']/2.
	self.freqpara['end'] = self.msfreqvalues[-1]+self.freqpara['step']/2.
	freq.close()
        ##########Getting Station Names###################
        antennas = pt.table(t.getkeyword("ANTENNA"), readonly=True, ack=False)
        self.stations = antennas.getcol('NAME')
        antennas.close()
	t.close()
Beispiel #4
0
 def __init__(self, MSfile, timecorr, block, solint, ionfactor, ncores,
     resume, parset, skymodel, parmdb, clobber, solver):
     self.file = MSfile
     self.msname = self.file.split('/')[-1]
     sw = pt.table(self.file + '/SPECTRAL_WINDOW', ack=False)
     self.freq = sw.col('REF_FREQUENCY')[0]
     sw.close()
     obs = pt.table(self.file + '/FIELD', ack=False)
     self.ra = np.degrees(float(obs.col('REFERENCE_DIR')[0][0][0]))
     if self.ra < 0.:
         self.ra=360.+(self.ra)
     self.dec = np.degrees(float(obs.col('REFERENCE_DIR')[0][0][1]))
     obs.close()
     ant = pt.table(self.file + '/ANTENNA', ack=False)
     diam = float(ant.col('DISH_DIAMETER')[0])
     ant.close()
     self.fwhm_deg = 1.1*((3.0e8/self.freq)/diam)*180./np.pi
     self.name = str(self.freq)
     self.timecorr = timecorr
     self.sol_block = block
     self.ionfactor = ionfactor
     self.ncores = ncores
     self.resume = resume
     self.solint = solint
     self.parset = parset
     self.input_parmdb = parmdb
     self.output_parmdb = 'instrument'
     self.skymodel = skymodel
     self.clobber = clobber
     self.solver = solver
    def _get_ra_and_decl_from_ms(self, measurement_set):
        """
        This function uses pyrap to read the ra and declanation from a 
        measurement set (used by expected_fluxes_in_fov). This is a position 
        in the sky. These values are stored in the field.phase_dir in the first
        row. All exceptions thrown are caught and logged, return None if reading
        failed
        """

        table = None;
        field = None;
        ra_and_decl = None;

        try:
            # open the ms, get the phase direction
            table = pt.table(measurement_set)
            field = pt.table(table.getkeyword("FIELD"))
            ra_and_decl = field.getcell("PHASE_DIR", 0)[0]

        except Exception, exception:
            #catch all exceptions and log
            self.logger.error("Error loading FIELD/PHASE_DIR from "
                              "measurementset {0} : {1}".format(measurement_set,
                                                                str(exception)))
            raise exception
Beispiel #6
0
def copy_column_to_bands(mslist, ms_from, inputcol, outputcol):
    """
    Copies one column from an MS file to multiple MS files (bands)

    Parameters
    ----------
    mslist : list
        MS files receiving copy
    ms_from : str
        MS file to copy from.
    inputcol : str
        Column name to copy from
    outputcol : str
        Column name to copy to

    """
    datain = pt.table(ms_from)
    data = datain.getcol(inputcol, nrow=1)
    numberofchans = numpy.int(numpy.shape(data)[1])
    chanperms = numberofchans/numpy.int(len(mslist))

    for ms_id, ms in enumerate(mslist):
        if os.path.isdir(ms):
            data = datain.getcolslice(inputcol, [chanperms*ms_id,0], [(chanperms*(ms_id+1))-1,3])
            dataout = pt.table(ms, readonly=False)
            dataout.putcol(outputcol, data)
            dataout.flush()
            dataout.close()
Beispiel #7
0
 def get_summary(self):
     subtables = self.ms.keywordnames()
     for subtable in ('POLARIZATION', 'OBSERVATION', 'FIELD', 
                      'SPECTRAL_WINDOW'):
         if subtable not in subtables:
             sys.stderr.write("Subtable %s missing from MS\n" % subtable)
             sys.exit()
     frequencies = self.get_frequencies()
     self.get_antenntas()
     polarization = {'count': pt.table(
             os.path.join(self.filename, 'POLARIZATION'), ack=False).getcol(
             'NUM_CORR')[0]}
     times = {'time': pt.table(
             os.path.join(self.filename, 'OBSERVATION'), ack=False).getcol(
             'TIME_RANGE')[0]}
     fieldnames = {'fieldnames': pt.table(
             os.path.join(self.filename, 'FIELD'), ack=False).getcol('NAME')}
     phases = {'direction': pt.table(
             os.path.join(self.filename, 'FIELD'), ack=False).getcol('PHASE_DIR')}
     self.summary = {
         'frequencies': frequencies,
         'polarization': polarization,
         'fieldnames': fieldnames,
         'times': times,
         'phases': phases
         }
def load_and_compare_data_sets(ms1, ms2):
    # open the two datasets
    ms1 = pt.table(ms1)
    ms2 = pt.table(ms2)

    #get the amount of rows in the dataset
    n_row = len(ms1.getcol('DATA'))
    n_complex_vis = 4

    # create a target array with the same length as the datacolumn
    div_array = numpy.zeros((n_row, 1, n_complex_vis), dtype=numpy.complex64)
    ms1_array = ms1.getcol('DATA')
    ms2_array = ms2.getcol('CORRECTED_DATA')

    div_max = 0
    for idx in xrange(n_row):
        for idy  in xrange(n_complex_vis):

            div_value = ms1_array[idx][0][idy] - ms2_array[idx][0][idy]
            if numpy.abs(div_value) > numpy.abs(div_max):
                div_max = div_value

            div_array[idx][0][idy] = div_value
    print "maximum different value between measurement sets: {0}".format(div_max)
    # Use a delta of about float precision
    if div_max > 1e-6:
        print "The measurement sets are contained a different value"
        print "failed delta test!"
        return False

    return True
Beispiel #9
0
def gain2matlab(msname='test1.MS', gainfilename='bbsgain.mat', timeslot=0, instrumentname='instrument'):
  parmdb=msname+'/'+instrumentname
  antenna=msname+'/ANTENNA'

  valstable=table(parmdb,ack=False)
  namestable=table(parmdb+"::NAMES",ack=False)
  antennatable=table(antenna,ack=False)

  vals=valstable.col('VALUES')
  names=namestable.col('NAME')
  antennas=antennatable.col('NAME')
  antennamap={};
  for i in range(antennas.nrows()):
    antennamap[antennas[i]]=i

  g = np.zeros((len(antennamap)*2,2),dtype=np.complex)

  for i in range(vals.nrows()):
    (bla, xcor, ycor, reim, ant) = names[i].split(':')
    antnr=antennamap[ant]
    (xcor,ycor)=(int(xcor),int(ycor))
    if reim=="Real":
      val=vals[i][timeslot][0]
    elif reim=="Imag":
      val=vals[i][timeslot][0]*(1.j)
    elif reim=="Phase":
      val=cmath.rect(1,vals[i][timeslot][0])
    #print antnr, xcor, ycor, antnr*2+xcor, ycor, val
    g[antnr*2+ycor][xcor]+=val.conjugate()

  scipy.io.savemat(gainfilename, dict(g=g), oned_as='row')
  print "Stored timeslot",timeslot,"gains from", msname, "/",instrumentname,"as",gainfilename
Beispiel #10
0
def splitdataset(dataset, interval, out):
    name = dataset.split("/")[-1]
    print "Splitting {0} by {1} sec intervals...".format(name, interval)
    t = pt.table(dataset, ack=False)
    starttime = t[0]["TIME"]
    endtime = t[t.nrows() - 1]["TIME"]
    numberofsplits = int((endtime - starttime) / interval)
    for split in range(0, numberofsplits):
        outputname = os.path.join(out, "splitMS", name + ".{0}sec_{1:04d}.split".format(int(interval), split + 1))
        if split == 0:
            thisstart = starttime - 2.0
        else:
            thisstart = starttime + (float(split) * interval)
        thisend = starttime + ((float(split) + 1) * interval)
        t1 = t.query(
            "TIME > "
            + str(thisstart)
            + " && \
		TIME < "
            + str(thisend),
            sortlist="TIME,ANTENNA1,ANTENNA2",
        )
        t1.copy(outputname, True)
        t1.close()
        if split == 0:
            thisstart += 2.0
        t1 = pt.table(outputname + "/OBSERVATION", ack=False, readonly=False)
        thistimerange = np.array([thisstart, thisend])
        t1.putcell("TIME_RANGE", 0, thistimerange)
        t1.putcell("LOFAR_OBSERVATION_START", 0, thisstart)
        t1.putcell("LOFAR_OBSERVATION_END", 0, thisend)
        t1.close()
    t.close()
Beispiel #11
0
def read_ms(infile, verbosity=1):
    """ Convert MS to a HDF file
    :param infile:  Measurement Set path
    :return: HDU version of Measurement Set
    """
    pp = PrintLog(verbosity=verbosity)
    ms = pt.table(infile)

    # Create a HDU List for storing HDUs
    hdul = IdiHdulist(verbosity=verbosity)

    # Add each column to the main HDU
    hdu_main = table2hdu(ms, "MAIN", verbosity=verbosity, close_after=False)
    hdul["MAIN"] = hdu_main

    # Now look for other keyword tables
    for key, val in ms.getkeywords().items():
        pp.debug(val)
        if type(val) in (unicode, str):
            if val.startswith("Table: "):
                tblpath = val.strip().split("Table: ")[1]
                pp.h2("Opening %s" % key)
                t = pt.table(tblpath)
                t_hdu = table2hdu(t, key, verbosity=verbosity)
                hdul[key] = t_hdu
        else:
            hdul["MAIN"].header.vals[key] = val

    ms.close()
    return hdul
def main(inms='',beVerbose=False):
        didWarn = False
        whatSkipped = {}
        t = pt.table(inms, ack=False)
        th = pt.table(t.getkeyword('HISTORY'), ack=False)
        colnames = th.colnames()
        nrows = th.nrows()
        print 'The HISTORY table in %s has %d rows' % (inms, nrows)
        for row in th:
                if row['APPLICATION'] == 'imager' or row['APPLICATION'] == 'OLAP' or row['APPLICATION'] == 'ms':
                        if beVerbose:
                                print '%s was run at time %f with parameters:' % (row['APPLICATION'],row['TIME'])
                                for r in row['APP_PARAMS']:
                                        print '\t%s' % (r)
                        else:
                                if not didWarn:
                                        print '(Skipping OLAP, imager, and ms rows, use -v to print them)'
                                        didWarn = True
                                if row['APPLICATION'] in whatSkipped:
                                        whatSkipped[row['APPLICATION']] += 1
                                else:
                                        whatSkipped[row['APPLICATION']] = 1
                else:
                        print '%s was run at time %f with parameters:' % (row['APPLICATION'],row['TIME'])
                        for r in row['APP_PARAMS']:
                                print '\t%s' % (r)
        print 'Overview of skipped rows:'
        for key in whatSkipped:
                print '\t%s:\tskipped %d times' % (key,whatSkipped[key])
Beispiel #13
0
def copy_column_to_ms(ms, inputcol, outputcol, ms_from=None):
    """
    Copies one column to another, within an MS file or between two MS files

    Parameters
    ----------
    ms : str
        MS file receiving copy
    inputcol : str
        Column name to copy from
    outputcol : str
        Column name to copy to
    ms_from : str, optional
        MS file to copy from. If None, the column is copied internally

    """
    t = pt.table(ms, readonly=False, ack=False)
    if ms_from is not None:
        tf = pt.table(ms_from, readonly=False, ack=False)
        data = tf.getcol(inputcol)
        desc = tf.getcoldesc(inputcol)
    else:
        data = t.getcol(inputcol)
        desc = t.getcoldesc(inputcol)

    # Add the output column if needed
    if outputcol not in t.colnames():
        desc['name'] = outputcol
        t.addcols(desc)

    t.putcol(outputcol, data)
    t.flush()
    t.close()
Beispiel #14
0
def rename2(SB, obsid):
	SBtable=pt.table("{0}/OBSERVATION".format(SB), ack=False)
	beam=int(SBtable.col("LOFAR_SUB_ARRAY_POINTING")[0])
	SBtable.close()
	SBtable=pt.table("{0}/SPECTRAL_WINDOW".format(SB), ack=False)
	sbno=int(SBtable.col("NAME")[0].split("-")[-1])
	SBtable.close()
	newname="{0}_SAP{1:03d}_SB{2:03d}_uv.MS.dppp".format(obsid,beam,sbno)
	return newname
Beispiel #15
0
def ms (msname="$MS",subtable=None,write=False):
  """Opens the MS or a subtable (read-only by default), returns table object."""
  msname = interpolate_locals("msname");
  if not msname:
    raise ValueError("'msname' or global MS variable must be set");
  if subtable:
    msname = table(msname,ack=False).getkeyword(subtable);
  tab = table(msname,readonly=not write,ack=False);
  return tab;
Beispiel #16
0
def read_ms(logger, msname, ateam, diameter=None):
    def get_station_diameter(table):
        histable = pt.table(table.getkeyword('HISTORY'), ack=False)
        for line in histable.getcell('APP_PARAMS', 0):
            try:
                key, value = line.split("=")
            except:
                pass
            if key == "Observation.antennaSet":
                antenna_set = value
                break
        if antenna_set == "LBA_INNER":
            logger.debug("LBA_INNER mode")
            return STATION_DIAMETER["LBA_INNER"]
        elif antenna_set[:3] == "LBA":
            logger.debug("LBA_(OUTER,SPARSE,X,Y) mode")
            return STATION_DIAMETER["LBA"]
        elif antenna_set[:3] == "HBA":
            logger.debug("HBA mode")
            return STATION_DIAMETER["HBA"]
        else:
            logger.error("Failed to identify antenna set")

    def field_size_ateam(table):
        logging.debug('Computing field size for A-team')
        fieldtable = table.getkeyword('FIELD').split()[1]
        taqloutput = pt.taql("calc from %s calc max(angdist (DELAY_DIR[0,], [%s]))" % (fieldtable, ", ".join(",".join(src) for src in ATEAM))  )
        return taqloutput[0]

    def field_size_nominal(table, wavelength, diameter):
        if not diameter:
            diameter = get_station_diameter(table)
        logger.debug("Station diameter %f m" % diameter)
        return 1.22*wavelength/diameter

    t = pt.table(msname, readonly=True, ack=False)
    interval = t.getcell('INTERVAL', 0)
    swtable = t.getkeyword('SPECTRAL_WINDOW')
    tsw = pt.table(swtable, readonly=True, ack=False)
    freq = tsw.getcell('REF_FREQUENCY', 0)
    wavelength = 299792458./freq
    maxbl = pt.taql("calc sqrt(max([select sumsqr(UVW[0:1]) from %s]))" % msname)[0] / wavelength
    chwidth = tsw.getcell('CHAN_WIDTH', 0)[0]

    if ateam:
        fieldsize = field_size_ateam(t)
    else:
        fieldsize = field_size_nominal(t, wavelength, diameter)

    logger.debug('Frequency is %f MHz'%(freq/1.e6))
    logger.debug('Wavelength is %f m'%(wavelength))
    logger.debug('Maximum baseline length is %f m = %f lambdas'%(maxbl*wavelength,maxbl))
    logger.debug('Integration time is %f sec'%(interval))
    logger.debug('Channel width is %f Hz'%(chwidth))
    logger.debug('Field size is %f degrees'%(fieldsize*180./3.14159))

    return fieldsize, maxbl, freq, interval, chwidth
Beispiel #17
0
    def compareColumn(self, columnname, taql=False):
        if self.verbose:
            print "Comparing "+  bcolors.OKBLUE + columnname + bcolors.ENDC + " columns." # DEBUG

        passed=False
        errorcount=0                                # counter that counts rows with differying columns

        if taql==False:                             # If taql is not to be used for comparison, use numpy difference
          if self.debug:
            print "compareColumn() using numpy" 

          reftab=pt.table(self.MS)                # Open reference MS in readonly mode
          testtab=pt.table(self.test_MS)          # Open test MS in readonly mode     
     
          tc_ref=reftab.col(columnname)           # get column in reference table as numpy array
          tc_test=testtab.col(columnname)         # get column in test table as numpy array
  
          nrows=testtab.nrows()                  
          for i in progressbar( range(0, nrows-1), "comparing " + columnname + " ", 60):
              difference = numpy.max(abs(tc_test[i] - tc_ref[i]))    # Use numpy's ability to substract arrays from each other
              #sum=numpy.sum(difference)
              
              #if sum > (self.acceptancelimit/len(difference)):     # determine if this failed the test
              if difference > self.acceptancelimit:                 # determine if this failed the test
                  passed=False
              else:
                  passed=True
  
          reftab.close()
          testtab.close()
        else:
            if self.debug:
              print "compareColumn() using TaQL"          # DEBUG
  
            self.addRefColumnToTesttab(columnname)      # add reference table column as forward column
        
            testcolumnname = "test_" + columnname       # create name which is used in test_MS if refcolum was added
        
            # Loop over columns, compute and check difference (must be within self.acceptancelimit)            
            # use TaQL for this? How to select from two tables? TODO: check this!
            
#            taqlcmd = "SELECT * FROM '" + self.test_MS + "' WHERE !all(NEAR(Real("+columnname+"), Real("+testcolumnname+")) AND NEAR(Imag("+columnname+"), Imag("+testcolumnname+")))"
#            errorcount = result.nrows()
            taqlcmd = "SELECT * FROM '" + self.test_MS + "' WHERE !all(NEARABS(Real("+columnname+"), Real("+testcolumnname+")," + str(self.acceptancelimit) + ") AND NEARABS(Imag("+columnname+"), Imag("+testcolumnname+"),"+ str(self.acceptancelimit) +"))"
#            print "taqlcmd = ", taqlcmd     # DEBUG
            errorcount=pt.taql(taqlcmd).nrows()            
            
            if self.verbose or self.debug:
              print "errorcount = ", errorcount         # display number of errors=No. of rows

            # If test_MS COLUMN and reference COLUMN have any discrepancy...            
            if errorcount > 0:
                passed=False      # ... the test is failed
            else:
                passed=True
        return passed
Beispiel #18
0
def updatehistory(outms):
  """
  Update history to show that this script has modified original data
  """
  tc = pt.table(outms,readonly=False)
  th = pt.table(tc.getkeyword('HISTORY'), readonly=False, ack=False)
  nr=th.nrows()
  th.addrows(1)
  tr=th.row()
  tr.put(nr,{'TIME': quantity('today').get('s').get_value(), 'OBSERVATION_ID':0,'MESSAGE': ' ', 'PRIORITY': ' ', 'ORIGIN': ' ','OBJECT_ID':0, 'APPLICATION':'mslin2circ','CLI_COMMAND':[''],'APP_PARAMS': ['']})
Beispiel #19
0
 def getantlist(self):
         print 'Listing antennas in MS '+self.inputEntry.get()+'\n'
         ttmp = pt.table(self.inputEntry.get(),readonly=True,ack=False)
         tant = pt.table(ttmp.getkeyword('ANTENNA'),readonly=True,ack=False)
         antlist = tant.getcol('NAME')
         self.antList.delete(0,END)
         ninserted = 0
         for ant in antlist:
                 self.antList.insert(END, ant)
                 self.antList.selection_set(ninserted)
                 ninserted += 1
Beispiel #20
0
def updateObsTable (image, msName, minbl, maxbl, aswvl,
                    usedCounts, visCounts, minTime, maxTime, totTime):
    obstab = pt.table (image.name() + "/LOFAR_OBSERVATION", readonly=False,
                       ack=False)
    oritab = pt.table (image.name() + "/LOFAR_ORIGIN", ack=False)
    minfreq = pt.taql ("calc min([select FREQUENCY_MIN from '" +
                       oritab.name() + "'])")
    maxfreq = pt.taql ("calc max([select FREQUENCY_MAX from '" +
                       oritab.name() + "'])") 
    obstab.putcell ("OBSERVATION_FREQUENCY_MIN", 0, minfreq[0]);
    obstab.putcell ("OBSERVATION_FREQUENCY_MAX", 0, maxfreq[0]);
    obstab.putcell ("OBSERVATION_FREQUENCY_CENTER", 0, (minfreq[0]+maxfreq[0])/2);
    obstab.putcell ("OBSERVATION_INTEGRATION_TIME", 0, totTime);
    obstab.putcell ("OBSERVATION_START", 0, minTime);
    obstab.putcell ("OBSERVATION_END", 0, maxTime);
    obstab.putcell ("TIME_RANGE", 0, (minTime, maxTime));
    obstab.putcell ("FILENAME", 0, os.path.basename(image.name()))
    obstab.putcell ("FILETYPE", 0, "sky")
    pt.taql ("update '" + obstab.name() + "' set FILEDATE = mjd(date()), " +
             "RELEASE_DATE = mjd(date()+365)")
    # Determine minimum and maximum baseline length
    # If needed, convert from wavelengths to meters.
    mstab = pt.table(msName, ack=False)
    if aswvl:
        minbl *= 2.99792e8 / maxfreq[0]
        maxbl *= 2.99792e8 / minfreq[0]
    if minbl <= 0:
        mbl = pt.taql ("calc sqrt(min([select sumsqr(UVW[:2]) from " + msName + "]))")
        minbl = max(mbl[0], abs(minbl))
    if maxbl <= 0:
        mbl = pt.taql ("calc sqrt(max([select sumsqr(UVW[:2]) from " + msName + "]))")
        if maxbl == 0:
            maxbl = mbl[0]
        else:
            maxbl = min(mbl[0], abs(maxbl))
    mstab.close()
    # Add and fill a few extra columns.
    col1 = pt.makescacoldesc ("MIN_BASELINE_LENGTH", 0, valuetype='double')
    col2 = pt.makescacoldesc ("MAX_BASELINE_LENGTH", 0, valuetype='double')
    col3 = pt.makearrcoldesc ("NVIS_USED", 0, valuetype='int')
    col4 = pt.makearrcoldesc ("NVIS_TOTAL", 0, valuetype='int')
    obstab.addcols (pt.maketabdesc ([col1, col2, col3, col4]))
    obstab.putcolkeyword ("MIN_BASELINE_LENGTH", "QuantumUnits", ["m"])
    obstab.putcolkeyword ("MAX_BASELINE_LENGTH", "QuantumUnits", ["m"])
    obstab.putcell ("MIN_BASELINE_LENGTH", 0, minbl)
    obstab.putcell ("MAX_BASELINE_LENGTH", 0, maxbl)
    # Get sum for all MSs.
    tusedCounts = usedCounts.sum (axis=0)
    tvisCounts  =  visCounts.sum (axis=0)
    obstab.putcell ("NVIS_USED", 0, tusedCounts)
    obstab.putcell ("NVIS_TOTAL", 0, tvisCounts)
    obstab.close()
    oritab.close()
    print "Updated subtable LOFAR_OBSERVATION"
Beispiel #21
0
    def putInMS(self):
        MS=self.MS
        MS1name=self.MS1name
        
        from pyrap.tables import table
        MSout=ClassMS.ClassMS(MS1name,Col=self.Cols)
        
        idx=0
        AntMap=np.zeros((MS.na,MS.na),dtype=np.int32)
        for i in range(MS.na):
            for j in range(i,MS.na):
                #AntMap[i,j]=idx
                AntMap[MSout.A0[idx],MSout.A1[idx]]=idx
                idx+=1

        t0=MS.times_all[0]
        it=np.int64(np.round((MS.times_all-t0)/self.Dt))*MSout.nbl
        Rowmap=it+AntMap[MS.A0,MS.A1]
        
        indIn=np.arange(MS.uvw.shape[0])
        MSout.flag_all.fill(1)
        
        #replace:
        MSout.uvw[Rowmap,:]=MS.uvw[:,:]
        #MSout.times_all[Rowmap]=MS.times_all[:]
        MSout.A0[Rowmap]=MS.A0[:]
        MSout.A1[Rowmap]=MS.A1[:]
        
        if not(self.RevertFreqs):
            for i in range(len(self.Cols)):
                MSout.data[i][Rowmap,:,:]=MS.data[i][:,:,:]
            MSout.flag_all[Rowmap,:,:]=MS.flag_all[:,:,:]
        else:
            for i in range(len(self.Cols)):
                MSout.data[i][Rowmap,::-1,:]=MS.data[i][:,:,:]
            MSout.flag_all[Rowmap,::-1,:]=MS.flag_all[:,:,:]

        MSout.SaveAllDataStruct()

        t=table(MS.MSName+"::SPECTRAL_WINDOW",ack=False,readonly=False)
        chanFreqs=t.getcol('CHAN_FREQ')
        chanWidth=t.getcol('CHAN_WIDTH')
        t.close()
        for spw in range(chanFreqs.shape[0]):
            ind=np.argsort(chanFreqs[spw])
            chanFreqs[spw][:]=chanFreqs[spw][ind]
            chanWidth[spw][:]=np.abs(chanWidth[spw][ind])

        t=table(MSout.MSName+"::SPECTRAL_WINDOW",ack=False,readonly=False)
        t.putcol('CHAN_FREQ',chanFreqs)
        t.putcol('CHAN_WIDTH',chanWidth)
        t.close()
Beispiel #22
0
def find_timeint(ms):
    """
    Get time interval in seconds
    """
    import pyrap.tables as tb
    t = tb.table(ms, ack=False)
    Ntimes = len(set(t.getcol('TIME')))
    t.close()
    t = tb.table(ms+'/OBSERVATION', ack=False)
    deltat = (t.getcol('TIME_RANGE')[0][1]-t.getcol('TIME_RANGE')[0][0])/Ntimes
    t.close()
    logging.debug('Time interval for '+ms+': '+str(deltat))
    return deltat
Beispiel #23
0
 def RotateMS(self,radec):
     import ModRotate
     ModRotate.Rotate(self,radec)
     ta=table(self.MSName+'/FIELD/',ack=False,readonly=False)
     ra,dec=radec
     radec=np.array([[[ra,dec]]])
     ta.putcol("DELAY_DIR",radec)
     ta.putcol("PHASE_DIR",radec)
     ta.putcol("REFERENCE_DIR",radec)
     ta.close()
     t=table(self.MSName,ack=False,readonly=False)
     t.putcol(self.ColName,self.data)
     t.putcol("UVW",self.uvw)
     t.close()
 def __init__(self,ms,times=[],direction=[]):
     myt=tab.table(ms+'/LOFAR_ANTENNA_FIELD')
     self.flags=myt.getcol("ELEMENT_FLAG")
     self.offsets=myt.getcol("ELEMENT_OFFSET")
     myt=tab.table(ms+'/ANTENNA')
     self.fieldcenter=myt.getcol("LOFAR_PHASE_REFERENCE")
     self.stationcenter=myt.getcol("POSITION")
     self.offsetshift=self.fieldcenter-self.stationcenter
     self.ms=ms
     self.refdir=tab.table(ms+'FIELD').getcol('PHASE_DIR')[0][0]
     itrfdir=[]
     self.times=times
     self.direction=direction
     self.initiated=False
     self.init_times()
Beispiel #25
0
def getdatainfo(ms):
	t1=pt.table("{0}.img.restored.corr".format(ms), ack=False)
	restbw=t1.getkeywords()['coords']['spectral2']['wcs']['cdelt']
	t1.close()
	t1=pt.table("{0}/OBSERVATION".format(ms), ack=False)
	thisendtime=t1.getcell('LOFAR_OBSERVATION_END', 0)
	thisantenna=t1.getcell('LOFAR_ANTENNA_SET', 0)
	t1.close()
	table = pt.table("{0}.img.restored.corr".format(ms), ack=False)
	subtables = open_subtables(table)
	ncore, nremote, nintl =  parse_stations(subtables)
	subbandwidth = parse_subbandwidth(subtables)
	subbands = parse_subbands(subtables)
	close_subtables(subtables)
	return restbw, thisendtime, thisantenna, ncore, nremote, nintl, subbandwidth, subbands
Beispiel #26
0
def getdatainfo(ms, imagename):
    t1 = pt.table("{0}.restored.corr".format(imagename), ack=False)
    restbw = t1.getkeywords()["coords"]["spectral2"]["wcs"]["cdelt"]
    t1.close()
    t1 = pt.table("{0}/OBSERVATION".format(ms), ack=False)
    thisendtime = t1.getcell("LOFAR_OBSERVATION_END", 0)
    thisantenna = t1.getcell("LOFAR_ANTENNA_SET", 0)
    t1.close()
    table = pt.table("{0}.restored.corr".format(imagename), ack=False)
    subtables = open_subtables(table)
    ncore, nremote, nintl = parse_stations(subtables)
    subbandwidth = parse_subbandwidth(subtables)
    subbands = parse_subbands(subtables)
    close_subtables(subtables)
    return restbw, thisendtime, thisantenna, ncore, nremote, nintl, subbandwidth, subbands
def grab_coo_MS(MS):
    """
    Read the coordinates of a field from one MS corresponding to the selection given in the parameters

    Parameters
    ----------
    MS : str
        Full name (with path) to one MS of the field

    Returns
    -------
    RA, Dec : "tuple"
        coordinates of the field (RA, Dec in deg , J2000)
    """
    
    # reading the coordinates ("position") from the MS
    # NB: they are given in rad,rad (J2000) 
    [[[ra,dec]]] = pt.table(MS+'::FIELD', readonly=True, ack=False).getcol('PHASE_DIR')
    
    # RA is stocked in the MS in [-pi;pi]
    # => shift for the negative angles before the conversion to deg (so that RA in [0;2pi])
    if ra<0:
        ra=ra+2*np.pi
    
    # convert radians to degrees
    ra_deg =  ra/np.pi*180.
    dec_deg = dec/np.pi*180.
    
    # and sending the coordinates in deg
    return ra_deg,dec_deg
Beispiel #28
0
def plotflags (tabnames):
    """Plot NDPPP Count results

    A flagging or count step in NDPPP can save the flagging percentages per
    frequency or station. They are saved in a table (per subband) with the
    extension ''.flagfreq'' or ''.flagstat''.
    The flag percentages of a subband can be plotted by giving the name of
    the table containing the results.

    It is also possible to plot the results of multiple subbands by giving
    a list of table names. Frequency results will be sorted in order of
    frequency, while station results are averaged over the subbands.

    """
    t = pt.table(tabnames)
    if 'Frequency' in t.colnames():
        t1 = t.sort ('Frequency')
        pylab.plot (t1.getcol('Frequency'), t1.getcol('Percentage'))
    elif 'Station' in t.colnames():
        percs = []
        names = []
        for t1 in t.iter ('Station'):
            percs.append (t1.getcol('Percentage').mean())
            names.append (t1.getcell('Name', 0))
        pylab.plot (numpy.array(percs), '+')
    else:
        raise RuntimeError('Table appears not to be a NDPPP Count result; it does not contain a Frequency or Station column')
Beispiel #29
0
 def get_frequencies(self):
     table = pt.table(os.path.join(self.filename, 'SPECTRAL_WINDOW'), ack=False)
     frequencies = table.getcol('CHAN_FREQ')[0]
     nchannels = table.getcol('NUM_CHAN')[0]
     channel_width = table.getcol('CHAN_WIDTH')[0][0]
     return {'frequencies': frequencies, 'nchannels': nchannels,
             'width': channel_width}
Beispiel #30
0
		def create_mosaic(snap, band_nums, chosen_environ, pad):
			for b in band_nums:
				tocorrect=sorted(glob.glob(os.path.join(snap, "images","L*_SAP00?_BAND0{0}.MS.dppp.img_mosaic0.avgpb".format(band_nums))))
				for w in tocorrect:
					wname=w.split("/")[-1]
					if chosen_environ=='rsm-mainline' and pad > 1.0:
						log.info("Correcting {0} mosaic padding...".format(wname))
						avgpb=pt.table("{0}".format(w), ack=False, readonly=False)
						coordstable=avgpb.getkeyword('coords')
						coordstablecopy=coordstable.copy()
						value1=coordstablecopy['direction0']['crpix'][0]
						value2=coordstablecopy['direction0']['crpix'][1]
						value1*=pad
						value2*=pad
						# value1=960.0
						# value2=960.0
						newcrpix=np.array([value1, value2])
						coordstablecopy['direction0']['crpix']=newcrpix
						avgpb.putkeyword('coords', coordstablecopy)
						avgpb.close()
					log.info("Zeroing corners of avgpb {0}...".format(wname))
					subprocess.call("python /home/as24v07/scripts/avgpbz.py {0} > {1}/logs/avgpbz_{2}_log.txt 2>&1".format(w, snap, wname), shell=True)
				tomosaic=sorted(glob.glob(os.path.join(snap, "{0}_SAP00?_BAND0{1}.MS.dppp".format(snap,b))))
				log.info("Creating {0} BAND0{1} Mosaic...".format(snap, b))
				m_list=[i.split("/")[0]+"/images/"+i.split("/")[-1]+".img_mosaic" for i in tomosaic]
				m_name=os.path.join(snap, "images", "{0}_BAND0{1}_mosaic.fits".format(snap, b))
				m_sens_name=os.path.join(snap, "images", "{0}_BAND0{1}_mosaic_sens.fits".format(snap, b))
				subprocess.call("python /home/as24v07/scripts/mos.py -o {0} -a avgpbz -s {1} {2} > {3}/logs/mosaic_band0{4}_log.txt 2>&1".format(m_name, m_sens_name, ",".join(m_list), snap, b), shell=True)
Beispiel #31
0
def main(data_dir, working_dir, obs_num, new_weights_col):
    msfiles = glob.glob(os.path.join(data_dir, 'L{}*.ms'.format(obs_num)))
    if len(msfiles) == 0:
        raise IOError("No msfiles")
    mslist_file = os.path.join(working_dir, 'mslist.txt')
    with open(mslist_file, 'w') as f:
        for ms in msfiles:
            f.write("{}\n".format(ms))
    merged_h5parm = os.path.join(
        data_dir, 'L{}_{}_merged.h5'.format(obs_num, 'DDS5_full'))
    with tables.open_file(merged_h5parm) as datapack:
        root = getattr(datapack, "root")
        sol000 = getattr(root, "sol000")
        tec_outliers_soltab = getattr(sol000, "tec_outliers000")
        times = tec_outliers_soltab.time[:]
        antennas = np.array(tec_outliers_soltab.ant[:])
        # Npol, Nd, Na, Nt
        outliers = tec_outliers_soltab.val[...]
        Npol, Nd, Na, Nt = outliers.shape
    # Na, Nt
    flags = np.mean(outliers[0, ...], axis=0)  # > Nd * outlier_frac_thresh

    # plotting some things
    frac_flagged = np.sum(flags, axis=0) / float(Na)
    plt.plot(frac_flagged)
    plt.xlabel('Time')
    plt.ylabel("Frac flagged [1]")
    plt.ylim(0., 1.)
    plt.savefig(os.path.join(working_dir, "frac_flagged_per_time.png"))
    plt.savefig(os.path.join(working_dir, "frac_flagged_per_time.pdf"))
    plt.close('all')

    frac_flagged = np.sum(flags, axis=1) / float(Nt)
    plt.plot(frac_flagged)
    plt.xlabel('Antenna index')
    plt.ylabel("Frac flagged [1]")
    plt.ylim(0., 1.)
    plt.savefig(os.path.join(working_dir, "frac_flagged_per_ant.png"))
    plt.savefig(os.path.join(working_dir, "frac_flagged_per_ant.pdf"))
    plt.close('all')

    frac_outliers = np.sum(outliers[0, ...], axis=0) / float(Nd)
    frac_flagged = np.sum(frac_outliers, axis=0) / float(Na)
    plt.plot(frac_flagged)
    plt.xlabel('Time')
    plt.ylabel("Frac flagged [1]")
    plt.ylim(0., 1.)
    plt.savefig(os.path.join(working_dir, "frac_outliers_per_time.png"))
    plt.savefig(os.path.join(working_dir, "frac_outliers_per_time.pdf"))
    plt.close('all')

    frac_flagged = np.sum(frac_outliers, axis=1) / float(Nt)
    plt.plot(frac_flagged)
    plt.xlabel('Antenna index')
    plt.ylabel("Frac flagged [1]")
    plt.ylim(0., 1.)
    plt.savefig(os.path.join(working_dir, "frac_outliers_per_ant.png"))
    plt.savefig(os.path.join(working_dir, "frac_outliers_per_ant.pdf"))
    plt.close('all')

    for ms in msfiles:
        with pt.table(os.path.join(ms, "SPECTRAL_WINDOW")) as t_sw:
            ms_freq = np.mean(t_sw.getcol("CHAN_FREQ"))
        with pt.table(os.path.join(ms, "ANTENNA")) as t_ant:
            ant_names = np.array(t_ant.getcol('NAME'))
            ant_names = ant_names.astype(antennas.dtype)
        ant_map = np.array([list(antennas).index(a) for a in ant_names])
        logger.info("Antenna map from MS to H5parm: {}".format(ant_map))
        with pt.table(ms, readonly=False) as t:
            weights_col = t.getcol("IMAGING_WEIGHT")
            logger.info("Weight col is shape {}".format(weights_col.shape))
            cols = t.colnames()
            if new_weights_col in cols:
                t.removecols(new_weights_col)
            desc = t.getcoldesc("IMAGING_WEIGHT")
            desc['name'] = new_weights_col
            t.addcols(desc)
            logger.info("Created {}".format(new_weights_col))
            # t.putcol(new_weights_col, weights_col)
            # logger.info("Stored original weights")
            vis_ant1 = ant_map[t.getcol('ANTENNA1')]
            vis_ant2 = ant_map[t.getcol('ANTENNA2')]
            vis_times = t.getcol('TIME')
            # indexes closest point in solset
            time_map = np.searchsorted(times, vis_times, side='right')
            new_flags = np.maximum(flags[vis_ant1, time_map], flags[vis_ant2,
                                                                    time_map])
            logger.info("Flagged [{} / {}] baselines ({:.2f}%)".format(
                np.sum(new_flags), new_flags.size,
                100. * (np.sum(new_flags) / float(new_flags.size))))
            new_weights = (
                1. - new_flags[:, None]
            ) * weights_col  # + new_flags[:,None]*0.#np.where(new_flags[:, None], 0., weights_col)
            t.putcol(new_weights_col, new_weights)
            logger.info("Stored flags in {}".format(new_weights_col))
Beispiel #32
0
import pyrap.tables as pt

oldtable = "/scratch/jason/chiles_original.ms"
newtable = "/scratch/jason/chiles_adios.ms"
t = pt.table(oldtable)
dmdef = t.getdminfo()

print("Original Table: **************************")
for i in dmdef:
    print(i)
    print(dmdef[i])

dmdef["*17"]["TYPE"] = "AdiosStMan"
print("New Table: ***********************")
for i in dmdef:
    print(i)
    print(dmdef[i])

t.copy(newtable, True, True, dminfo=dmdef)
Beispiel #33
0
    for i in range(0, len(fields)):

        # Define input MS
        in_ms = in_path + '/%s/%s.ms' % (obsid, fields[i])

        # Define rest frequency dictionary
        desc = {
            'name': 'REST_FREQUENCY',
            '_c_order': True,
            'comment': 'Line rest frequency',
            'dataManagerGroup': 'StandardStMan',
            'dataManagerType': 'StandardStMan',
            'keywords': {
                'MEASINFO': {
                    'Ref': 'LSRK',
                    'type': 'frequency'
                },
                'QuantumUnits': ['Hz']
            },
            'maxlen': 0,
            'ndim': -1,
            'option': 0,
            'valueType': 'double'
        }

        # Add rest frequency field to table
        tt = table('%s/SOURCE' % (in_ms), readonly=False)
        if 'REST_FREQUENCY' not in tt.colnames():
            tt.addcols(desc)
        tt.done()
Beispiel #34
0
import numpy
import glob
import subprocess
from pyrap.tables import table

mslist = glob.glob('*wtspec.ms')

for myms in mslist:
    dryrun = False

    tt = table(myms, ack=False)
    fields = numpy.unique(tt.getcol('FIELD_ID')).tolist()
    field = fields[0]
    tt.done()

    print myms + ' using field ' + str(field)

    # wsclean myms suffix column field automask updatemodel dryrun
    cc = 'python run_wsclean.py ' + myms + ' data DATA ' + str(
        field) + ' True True ' + str(dryrun)
    subprocess.call(cc, shell=True)

    # cubical myms parset prefix field dryrun
    cc = 'python run_cubical.py ' + myms + ' askap-phasecal.parset pcal ' + str(
        field) + ' ' + str(dryrun)
    subprocess.call(cc, shell=True)

    # wsclean myms suffix column field automask updatemodel dryrun
    cc = 'python run_wsclean.py ' + myms + ' pcal CORRECTED_DATA ' + str(
        field) + ' True True ' + str(dryrun)
    subprocess.call(cc, shell=True)
Beispiel #35
0
    def _msss_mask(self, mask_file_path, sourcedb_path, mask_patch_size=1.0):
        """
        Fill casa image with a mask based on skymodel(sourcedb)
        Bugs: [email protected]
        
        pipeline implementation [email protected]
        version 0.32
        
        Edited by JDS, 2012-03-16:
         - Properly convert maj/minor axes to half length
         - Handle empty fields in sky model by setting them to 0
         - Fix off-by-one error at mask boundary
        
        FIXED BUG
         - if a source is outside the mask, the script ignores it
         - if a source is on the border, the script draws only the inner part
         - can handle skymodels with different headers
        
        KNOWN BUG
         - not works with single line skymodels, workaround: add a fake
           source outside the field
         - mask patched display large amounts of aliasing. A possible 
           sollution would
           be normalizing to pixel centre. ( int(normalize_x * npix) /
           npix + (0.5 /npix)) 
           ideally the patch would increment in pixel radiuses
             
        Version 0.3  (Wouter Klijn, [email protected])
         - Usage of sourcedb instead of txt document as 'source' of sources
           This allows input from different source sources
        Version 0.31  (Wouter Klijn, [email protected])
         - Adaptable patch size (patch size needs specification)
         - Patch size and geometry is broken: needs some astronomer magic to
           fix it, problem with afine transformation prol.
        Version 0.32 (Wouter Klijn, [email protected])
         - Renaming of variable names to python convention
        """
        # increment in maj/minor axes [arcsec]
        pad = 500.

        # open mask
        mask = pim.image(mask_file_path, overwrite=True)
        mask_data = mask.getdata()
        xlen, ylen = mask.shape()[2:]
        freq, stokes, null, null = mask.toworld([0, 0, 0, 0])

        # Open the sourcedb:
        table = pt.table(sourcedb_path + "::SOURCES")
        pdb = lofar.parmdb.parmdb(sourcedb_path)

        # Get the data of interest
        source_list = table.getcol("SOURCENAME")
        source_type_list = table.getcol("SOURCETYPE")
        # All date in the format valuetype:sourcename
        all_values_dict = pdb.getDefValues()

        # Loop the sources
        for source, source_type in zip(source_list, source_type_list):
            if source_type == 1:
                type_string = "Gaussian"
            else:
                type_string = "Point"
            self.logger.info("processing: {0} ({1})".format(
                source, type_string))

            # Get de right_ascension and declination (already in radians)
            right_ascension = all_values_dict["Ra:" + source][0, 0]
            declination = all_values_dict["Dec:" + source][0, 0]
            if source_type == 1:
                # Get the raw values from the db
                maj_raw = all_values_dict["MajorAxis:" + source][0, 0]
                min_raw = all_values_dict["MinorAxis:" + source][0, 0]
                pa_raw = all_values_dict["Orientation:" + source][0, 0]
                # convert to radians (conversion is copy paste JDS)
                # major radius (+pad) in rad
                maj = (((maj_raw + pad)) / 3600.) * np.pi / 180.
                # minor radius (+pad) in rad
                minor = (((min_raw + pad)) / 3600.) * np.pi / 180.
                pix_asc = pa_raw * np.pi / 180.
                # wenss writes always 'GAUSSIAN' even for point sources
                # -> set to wenss beam+pad
                if maj == 0 or minor == 0:
                    maj = ((54. + pad) / 3600.) * np.pi / 180.
                    minor = ((54. + pad) / 3600.) * np.pi / 180.
            # set to wenss beam+pad
            elif source_type == 0:
                maj = (((54. + pad) / 2.) / 3600.) * np.pi / 180.
                minor = (((54. + pad) / 2.) / 3600.) * np.pi / 180.
                pix_asc = 0.
            else:
                self.logger.info("WARNING: unknown source source_type ({0}),"
                                 "ignoring: ".format(source_type))
                continue

            # define a small square around the source to look for it
            null, null, border_y1, border_x1 = mask.topixel([
                freq, stokes, declination - maj,
                right_ascension - maj / np.cos(declination - maj)
            ])
            null, null, border_y2, border_x2 = mask.topixel([
                freq, stokes, declination + maj,
                right_ascension + maj / np.cos(declination + maj)
            ])
            xmin = np.int(np.floor(np.min([border_x1, border_x2])))
            xmax = np.int(np.ceil(np.max([border_x1, border_x2])))
            ymin = np.int(np.floor(np.min([border_y1, border_y2])))
            ymax = np.int(np.ceil(np.max([border_y1, border_y2])))

            if xmin > xlen or ymin > ylen or xmax < 0 or ymax < 0:
                self.logger.info("WARNING: source {0} falls outside the mask,"
                                 " ignoring: ".format(source))
                continue

            if xmax > xlen or ymax > ylen or xmin < 0 or ymin < 0:
                self.logger.info(
                    "WARNING: source {0} falls across map edge".format(source))

            for pixel_x in xrange(xmin, xmax):
                for pixel_y in xrange(ymin, ymax):
                    # skip pixels outside the mask field
                    if pixel_x >= xlen or pixel_y >= ylen or\
                       pixel_x < 0 or pixel_y < 0:
                        continue
                    # get pixel right_ascension and declination in rad
                    null, null, pix_dec, pix_ra = mask.toworld(
                        [0, 0, pixel_y, pixel_x])
                    # Translate and rotate coords.
                    translated_pixel_x = (pix_ra - right_ascension) * np.sin(
                        pix_asc) + (pix_dec - declination) * np.cos(pix_asc)
                    # to align with ellipse
                    translate_pixel_y = -(pix_ra - right_ascension) * np.cos(
                        pix_asc) + (pix_dec - declination) * np.sin(pix_asc)
                    if (((translated_pixel_x ** 2) / (maj ** 2)) +
                        ((translate_pixel_y ** 2) / (minor ** 2))) < \
                                                         mask_patch_size:
                        mask_data[0, 0, pixel_y, pixel_x] = 1
        null = null
        mask.putdata(mask_data)
        table.close()
def main(ms_input,
         outmapname=None,
         mapfile_dir=None,
         cellsize_highres_deg=0.00208,
         cellsize_lowres_deg=0.00694,
         fieldsize_highres=2.5,
         fieldsize_lowres=6.5,
         image_padding=1.,
         y_axis_stretch=1.):
    """
    Check a list of MS files for missing frequencies

    Parameters
    ----------
    ms_input : list or str
        List of MS filenames, or string with list, or path to a mapfile
    outmapname: str
        Name of output mapfile
    mapfile_dir : str
        Directory for output mapfile
    cellsize_highres_deg : float, optional
        cellsize for the high-res images in deg
    cellsize_lowres_deg : float, optional
        cellsize for the low-res images in deg
    fieldsize_highres : float, optional
        How many FWHM's shall the high-res images be.
    fieldsize_lowres : float, optional
        How many FWHM's shall the low-res images be.
    image_padding : float, optional
        How much padding shall we add to the padded image sizes.
    y_axis_stretch : float, optional
        How much shall the y-axis be stretched or compressed. 

    Returns
    -------
    result : dict
        Dict with the name of the generated mapfiles

    """

    if not outmapname or not mapfile_dir:
        raise ValueError(
            'sort_times_into_freqGroups: outmapname and mapfile_dir are needed!'
        )
    if type(ms_input) is str:
        if ms_input.startswith('[') and ms_input.endswith(']'):
            ms_list = [
                f.strip(' \'\"') for f in ms_input.strip('[]').split(',')
            ]
        else:
            map_in = DataMap.load(ms_input)
            map_in.iterator = DataMap.SkipIterator
            ms_list = []
            for fname in map_in:
                if fname.startswith('[') and fname.endswith(']'):
                    for f in fname.strip('[]').split(','):
                        ms_list.append(f.strip(' \'\"'))
                else:
                    ms_list.append(fname.strip(' \'\"'))
    elif type(ms_input) is list:
        ms_list = [str(f).strip(' \'\"') for f in ms_input]
    else:
        raise TypeError('sort_into_freqBands: type of "ms_input" unknown!')

    cellsize_highres_deg = float(cellsize_highres_deg)
    cellsize_lowres_deg = float(cellsize_lowres_deg)
    fieldsize_highres = float(fieldsize_highres)
    fieldsize_lowres = float(fieldsize_lowres)
    image_padding = float(image_padding)
    y_axis_stretch = float(y_axis_stretch)

    msdict = {}
    for ms in ms_list:
        # group all MSs by frequency
        sw = pt.table(ms + '::SPECTRAL_WINDOW', ack=False)
        msfreq = int(sw.col('REF_FREQUENCY')[0])
        sw.close()
        if msfreq in msdict:
            msdict[msfreq].append(ms)
        else:
            msdict[msfreq] = [ms]
    bands = []
    bandfreqs = []
    print "InitSubtract_sort_and_compute.py: Putting files into bands."
    for MSkey in msdict.keys():
        bands.append(Band(msdict[MSkey]))
        bandfreqs.append(Band(msdict[MSkey]).freq)

    ## min freq gives largest image size for deep image
    bandfreqs = np.array(bandfreqs)
    minfreq = np.min(bandfreqs)
    bandmin = np.argmin(bandfreqs)
    ## need to map the output from wsclean channels to the right frequencies
    ## just put the bands in the right freq order
    wsclean_channum = np.argsort(bandfreqs)
    bands = np.array(bands)
    bands = bands[wsclean_channum]

    #minfreq = 1e9
    #for ib, band in enumerate(bands):
    #if band.freq < minfreq:
    #minfreq = band.freq
    #bandmin = ib

    group_map = MultiDataMap()
    file_single_map = DataMap([])
    high_size_map = DataMap([])
    low_size_map = DataMap([])
    high_paddedsize_map = DataMap([])
    low_paddedsize_map = DataMap([])
    numfiles = 0
    nbands = len(bands)
    if nbands > 8:
        nchansout_clean1 = np.int(nbands / 4)
    elif nbands > 4:
        nchansout_clean1 = np.int(nbands / 2)
    else:
        nchansout_clean1 = np.int(nbands)

    (freqstep, timestep) = bands[0].get_averaging_steps()
    (nwavelengths_high,
     nwavelengths_low) = bands[0].nwavelengths(cellsize_highres_deg,
                                               cellsize_lowres_deg, timestep)
    for band in bands:
        print "InitSubtract_sort_and_compute.py: Working on Band:", band.name
        group_map.append(MultiDataProduct('localhost', band.files, False))
        numfiles += len(band.files)
        for filename in band.files:
            file_single_map.append(DataProduct('localhost', filename, False))
        (imsize_high_res, imsize_low_res) = band.get_image_sizes(
            cellsize_highres_deg, cellsize_lowres_deg, fieldsize_highres,
            fieldsize_lowres)
        imsize_high_res_stretch = band.get_optimum_size(
            int(imsize_high_res * y_axis_stretch))
        high_size_map.append(
            DataProduct(
                'localhost',
                str(imsize_high_res) + " " + str(imsize_high_res_stretch),
                False))
        imsize_low_res_stretch = band.get_optimum_size(
            int(imsize_low_res * y_axis_stretch))
        low_size_map.append(
            DataProduct(
                'localhost',
                str(imsize_low_res) + " " + str(imsize_low_res_stretch),
                False))
        imsize_high_pad = band.get_optimum_size(
            int(imsize_high_res * image_padding))
        imsize_high_pad_stretch = band.get_optimum_size(
            int(imsize_high_res * image_padding * y_axis_stretch))
        high_paddedsize_map.append(
            DataProduct(
                'localhost',
                str(imsize_high_pad) + " " + str(imsize_high_pad_stretch),
                False))
        imsize_low_pad = band.get_optimum_size(
            int(imsize_low_res * image_padding))
        imsize_low_pad_stretch = band.get_optimum_size(
            int(imsize_low_res * image_padding * y_axis_stretch))
        low_paddedsize_map.append(
            DataProduct(
                'localhost',
                str(imsize_low_pad) + " " + str(imsize_low_pad_stretch),
                False))

        print band.freq / 1e6, imsize_high_res, imsize_high_res_stretch, imsize_high_pad, imsize_high_pad_stretch, imsize_low_res, imsize_low_res_stretch, imsize_low_pad, imsize_low_pad_stretch, nwavelengths_high, nwavelengths_low

        if band.freq == minfreq:
            deep_imsize_high_res = imsize_high_res
            deep_imsize_high_res_stretch = imsize_high_res_stretch
            deep_imsize_high_pad = imsize_high_pad
            deep_imsize_high_pad_stretch = imsize_high_pad_stretch
            deep_imsize_low_res = imsize_low_res
            deep_imsize_low_res_stretch = imsize_low_res_stretch
            deep_imsize_low_pad = imsize_low_pad
            deep_imsize_low_pad_stretch = imsize_low_pad_stretch

            print '*', band.freq / 1e6, imsize_high_res, imsize_high_res_stretch, imsize_high_pad, imsize_high_pad_stretch, imsize_low_res, imsize_low_res_stretch, imsize_low_pad, imsize_low_pad_stretch

    deep_high_size_map = DataMap([
        DataProduct(
            'localhost',
            str(deep_imsize_high_res) + " " +
            str(deep_imsize_high_res_stretch), False)
    ])
    deep_high_paddedsize_map = DataMap([
        DataProduct(
            'localhost',
            str(deep_imsize_high_pad) + " " +
            str(deep_imsize_high_pad_stretch), False)
    ])
    deep_low_size_map = DataMap([
        DataProduct(
            'localhost',
            str(deep_imsize_low_res) + " " + str(deep_imsize_low_res_stretch),
            False)
    ])
    deep_low_paddedsize_map = DataMap([
        DataProduct(
            'localhost',
            str(deep_imsize_low_pad) + " " + str(deep_imsize_low_pad_stretch),
            False)
    ])
    nbands_map = DataMap([DataProduct('localhost', str(nbands), False)])
    nchansout_clean1_map = DataMap(
        [DataProduct('localhost', str(nchansout_clean1), False)])
    print "InitSubtract_sort_and_compute.py: Computing averaging steps."

    # get mapfiles for freqstep and timestep with the length of single_map
    freqstep_map = DataMap([])
    timestep_map = DataMap([])
    nwavelengths_high_map = DataMap([])
    nwavelengths_low_map = DataMap([])

    for index in xrange(numfiles):
        freqstep_map.append(DataProduct('localhost', str(freqstep), False))
        timestep_map.append(DataProduct('localhost', str(timestep), False))
    nwavelengths_high_map.append(
        DataProduct('localhost', str(nwavelengths_high), False))
    nwavelengths_low_map.append(
        DataProduct('localhost', str(nwavelengths_low), False))

    groupmapname = os.path.join(mapfile_dir, outmapname)
    group_map.save(groupmapname)
    file_single_mapname = os.path.join(mapfile_dir, outmapname + '_single')
    file_single_map.save(file_single_mapname)

    high_sizename = os.path.join(mapfile_dir, outmapname + '_high_size')
    high_size_map.save(high_sizename)
    low_sizename = os.path.join(mapfile_dir, outmapname + '_low_size')
    low_size_map.save(low_sizename)
    high_padsize_name = os.path.join(mapfile_dir,
                                     outmapname + '_high_padded_size')
    high_paddedsize_map.save(high_padsize_name)
    low_padsize_name = os.path.join(mapfile_dir,
                                    outmapname + '_low_padded_size')
    low_paddedsize_map.save(low_padsize_name)

    deep_high_sizename = os.path.join(mapfile_dir,
                                      outmapname + '_deep_high_size')
    deep_high_size_map.save(deep_high_sizename)
    deep_low_sizename = os.path.join(mapfile_dir,
                                     outmapname + '_deep_low_size')
    deep_low_size_map.save(deep_low_sizename)
    deep_high_padsize_name = os.path.join(
        mapfile_dir, outmapname + '_deep_high_padded_size')
    deep_high_paddedsize_map.save(deep_high_padsize_name)
    deep_low_padsize_name = os.path.join(mapfile_dir,
                                         outmapname + '_deep_low_padded_size')
    deep_low_paddedsize_map.save(deep_low_padsize_name)

    nbands_mapname = os.path.join(mapfile_dir, outmapname + '_nbands')
    nbands_map.save(nbands_mapname)
    nchansout_clean1_mapname = os.path.join(mapfile_dir,
                                            outmapname + '_nchansout_clean1')
    nchansout_clean1_map.save(nchansout_clean1_mapname)

    freqstepname = os.path.join(mapfile_dir, outmapname + '_freqstep')
    freqstep_map.save(freqstepname)
    timestepname = os.path.join(mapfile_dir, outmapname + '_timestep')
    timestep_map.save(timestepname)
    nwavelengths_high_name = os.path.join(mapfile_dir,
                                          outmapname + '_nwavelengths_high')
    nwavelengths_high_map.save(nwavelengths_high_name)
    nwavelengths_low_name = os.path.join(mapfile_dir,
                                         outmapname + '_nwavelengths_low')
    nwavelengths_low_map.save(nwavelengths_low_name)

    result = {
        'groupmap': groupmapname,
        'single_mapfile': file_single_mapname,
        'high_size_mapfile': high_sizename,
        'low_size_mapfile': low_sizename,
        'high_padsize_mapfile': high_padsize_name,
        'low_padsize_mapfile': low_padsize_name,
        'deep_high_size_mapfile': deep_high_sizename,
        'deep_low_size_mapfile': deep_low_sizename,
        'deep_high_padsize_mapfile': deep_high_padsize_name,
        'deep_low_padsize_mapfile': deep_low_padsize_name,
        'nbands': nbands_mapname,
        'nchansout_clean1': nchansout_clean1_mapname,
        'freqstep': freqstepname,
        'timestep': timestepname,
        'nwavelengths_high_mapfile': nwavelengths_high_name,
        'nwavelengths_low_mapfile': nwavelengths_low_name
    }
    return result
Beispiel #37
0
def ProcessSingleMS(ms, kB, tsyseff, tsyseffFile, Aant, selectFieldName):
    print ''
    print '--- Working on file {0:s} ---'.format(ms)
    t = tables.table(ms)
    fieldIDs = t.getcol('FIELD_ID')
    ant1 = t.getcol('ANTENNA1')
    ant2 = t.getcol('ANTENNA2')
    fieldNames = tables.table(ms + '/FIELD').getcol('NAME')
    spw = tables.table(ms + '/SPECTRAL_WINDOW')
    channelWidths = spw.getcol('CHAN_WIDTH')
    channelFreqs = spw.getcol('CHAN_FREQ')

    if selectFieldName:
        try:
            selectFieldID = fieldNames.index(selectFieldName)
        except ValueError:
            print ' CATASTROPHE!'
            print ' Cannot find the field you want to process, {0:s}'.format(
                selectFieldName)
            print ' Available fields are', fieldNames
            print ' Aborting ...'
            sys.exit()
        print 'Successfully selected Field with name {0:s} (Field ID = {1:d})'.format(
            selectFieldName, selectFieldID)
        selection = fieldIDs == selectFieldID
    else:
        print 'Will process all available fields:', fieldNames
        selection = fieldIDs >= fieldIDs.min()

    autoCorr = ant1 == ant2
    if autoCorr.sum(): print 'Successfully selected crosscorrelations only'
    else: print 'Found crosscorrelations only'
    selection *= ant1 != ant2
    nrAnt = np.unique(np.concatenate((ant1, ant2))).shape[0]
    nrBaseline = nrAnt * (nrAnt - 1) / 2
    print 'Number of antennas  = {0:d}'.format(nrAnt)
    print 'Number of baselines = {0:d}'.format(nrBaseline)
    print 'Frequency coverage  = {0:.5e} Hz - {1:.5e} Hz'.format(
        channelFreqs.min(), channelFreqs.max())
    if np.unique(channelWidths).shape[0] == 1:
        print 'Channel width = {0:.5e} Hz'.format(np.unique(channelWidths)[0])
    else:
        print 'The channel width takes the following unique values:', np.unique(
            channelWidths), 'Hz'

    print 'Loading flags and intervals ...'
    flag = t.getcol('FLAG')[selection]  # flagged data have flag = True
    interval = t.getcol('INTERVAL')[selection]
    if np.unique(interval).shape[0] == 1:
        print 'Interval = {0:.5e} sec'.format(np.unique(interval)[0])
    else:
        print 'The interval takes the following unique values:', np.unique(
            interval), 'sec'
    t.close()

    print 'The *flag* array has shape (Nr_integrations, Nr_channels, Nr_polarisations) =', flag.shape
    print 'The *interval* array has shape (Nr_integrations) =', interval.shape
    print 'The *channel* width array has shape (-, Nr_channels) =', channelWidths.shape

    print 'Total Integration on selected field(s) = {0:.2f} h ({1:d} polarisations)'.format(
        interval.sum() / nrBaseline / 3600, flag.shape[2])
    if tsyseffFile != None:
        rms = np.sqrt(2) * kB * InterpolateTsyseff(
            tsyseff, channelFreqs) / Aant / np.sqrt(
                channelWidths * interval.sum() * flag.shape[2])
    else:
        rms = np.sqrt(2) * kB * tsyseff / Aant / np.sqrt(
            channelWidths * interval.sum() * flag.shape[2])
    if len(rms.shape) == 2 and rms.shape[0] == 1: rms = rms[0]

    print 'The Stokes I theoretical natural rms ignoring flags has median and range:    *** {0:.3e} Jy/b, ({1:.3e} - {2:.3e}) Jy/b ***'.format(
        np.nanmedian(rms), np.nanmin(rms), np.nanmax(rms))

    return flag, interval, channelWidths, channelFreqs, rms
Beispiel #38
0
    inter=interv[0]
else:
    if options.force:
        print("\nForcing concat with different time intervals. Will take first interval as interval to use")
        inter=interv[0]
    else:
        print("\nMeasurement sets intervals are not the same!")
        print("Intervals detected: {0}".format(interv))
        sys.exit()

print("\nInterval = {0}s".format(inter))    

if concat(args, oname):
# if True:
    print("Changing Times on Set...")
    mstochange=pt.table(oname, ack=False, readonly=False)
    amps_time = mstochange.getcol('TIME')
    amps_time_cen = mstochange.getcol('TIME_CENTROID')
    new_time, newtime_cen, start, end=newtimearray(amps_time, amps_time_cen, inter, gap)
    mstochange.putcol('TIME',new_time)
    mstochange.putcol('TIME_CENTROID',newtime_cen)
    mstochange.close()
    mstochange=pt.table(oname+'/OBSERVATION', ack=False, readonly=False)
    mstochange.putcell("LOFAR_OBSERVATION_START", 0, start)
    mstochange.putcell("LOFAR_OBSERVATION_END", 0, start)
    newrange=np.array([start, end])
    mstochange.putcell("TIME_RANGE", 0, newrange)
    mstochange.close()
    inttime=end-start
    print("Start Time: {0}".format(datetime.utcfromtimestamp(quantity('{0}s'.format(start)).to_unix_time())))
    print("NEW End Time: {0}".format(datetime.utcfromtimestamp(quantity('{0}s'.format(end)).to_unix_time())))
                    axesVals=[dirs,ants,freqs,times], vals=vals, weights=weights, parmdbType=', '.join(list(ptype)))
        elif solType == 'Clock':
            h5parm.makeSoltab(solset, 'clock', axesNames=['ant','freq','time'], \
                    axesVals=[ants,freqs,times], vals=vals, weights=weights, parmdbType=', '.join(list(ptype)))
        elif solType == 'TEC':
            h5parm.makeSoltab(solset, 'tec', axesNames=['dir','ant','freq','time'], \
                    axesVals=[dirs,ants,freqs,times], vals=vals, weights=weights, parmdbType=', '.join(list(ptype)))
        elif solType == '*Gain:*:Real' or solType == '*Gain:*:Ampl':
            h5parm.makeSoltab(solset, 'amplitude', axesNames=['pol','dir','ant','freq','time'], \
                    axesVals=[pols,dirs,ants,freqs,times], vals=vals, weights=weights, parmdbType=', '.join(list(ptype)))
        elif solType == '*Gain:*:Imag' or solType == '*Gain:*:Phase':
            h5parm.makeSoltab(solset, 'phase', axesNames=['pol','dir','ant','freq','time'], \
                    axesVals=[pols,dirs,ants,freqs,times], vals=vals, weights=weights, parmdbType=', '.join(list(ptype)))

    logging.info('Collecting information from the ANTENNA table.')
    antennaTable = pt.table(antennaFile, ack=False)
    antennaNames = antennaTable.getcol('NAME')
    antennaPositions = antennaTable.getcol('POSITION')
    antennaTable.close()
    antennaTable = solset._f_get_child('antenna')
    antennaTable.append(zip(*(antennaNames, antennaPositions)))

    logging.info('Collecting information from the FIELD table.')
    fieldTable = pt.table(fieldFile, ack=False)
    phaseDir = fieldTable.getcol('PHASE_DIR')
    pointing = phaseDir[0, 0, :]
    fieldTable.close()

    sourceTable = solset._f_get_child('source')
    # add the field centre, that is also the direction for Gain and CommonRotationAngle
    sourceTable.append([('pointing', pointing)])
Beispiel #40
0
def test_ms_create(tmp_path, chunks, num_chans, corr_types, sources):
    # Set up
    rs = np.random.RandomState(42)

    ms_path = tmp_path / "create.ms"

    ms_table_name = str(ms_path)
    ant_table_name = "::".join((ms_table_name, "ANTENNA"))
    ddid_table_name = "::".join((ms_table_name, "DATA_DESCRIPTION"))
    pol_table_name = "::".join((ms_table_name, "POLARIZATION"))
    spw_table_name = "::".join((ms_table_name, "SPECTRAL_WINDOW"))
    # SOURCE is an optional MS sub-table
    src_table_name = "::".join((ms_table_name, "SOURCE"))

    ms_datasets = []
    ant_datasets = []
    ddid_datasets = []
    pol_datasets = []
    spw_datasets = []
    src_datasets = []

    # For comparison
    all_data_desc_id = []
    all_data = []

    # Create ANTENNA dataset of 64 antennas
    # Each column in the ANTENNA has a fixed shape so we
    # can represent all rows with one dataset
    na = 64
    position = da.random.random((na, 3)) * 10000
    offset = da.random.random((na, 3))
    names = np.array(['ANTENNA-%d' % i for i in range(na)], dtype=np.object)
    ds = Dataset({
        'POSITION': (("row", "xyz"), position),
        'OFFSET': (("row", "xyz"), offset),
        'NAME': (("row", ), da.from_array(names, chunks=na)),
    })
    ant_datasets.append(ds)

    # Create SOURCE datasets
    for s, (name, direction, rest_freq) in enumerate(sources):
        dask_num_lines = da.full((1, ), len(rest_freq), dtype=np.int32)
        dask_direction = da.asarray(direction)[None, :]
        dask_rest_freq = da.asarray(rest_freq)[None, :]
        dask_name = da.asarray(np.asarray([name], dtype=np.object), chunks=1)
        ds = Dataset({
            "NUM_LINES": (("row", ), dask_num_lines),
            "NAME": (("row", ), dask_name),
            "REST_FREQUENCY": (("row", "line"), dask_rest_freq),
            "DIRECTION": (("row", "dir"), dask_direction),
        })
        src_datasets.append(ds)

    # Create POLARISATION datasets.
    # Dataset per output row required because column shapes are variable
    for r, corr_type in enumerate(corr_types):
        dask_num_corr = da.full((1, ), len(corr_type), dtype=np.int32)
        dask_corr_type = da.from_array(corr_type,
                                       chunks=len(corr_type))[None, :]
        ds = Dataset({
            "NUM_CORR": (("row", ), dask_num_corr),
            "CORR_TYPE": (("row", "corr"), dask_corr_type),
        })

        pol_datasets.append(ds)

    # Create multiple MeerKAT L-band SPECTRAL_WINDOW datasets
    # Dataset per output row required because column shapes are variable
    for num_chan in num_chans:
        dask_num_chan = da.full((1, ), num_chan, dtype=np.int32)
        dask_chan_freq = da.linspace(.856e9,
                                     2 * .856e9,
                                     num_chan,
                                     chunks=num_chan)[None, :]
        dask_chan_width = da.full((1, num_chan), .856e9 / num_chan)

        ds = Dataset({
            "NUM_CHAN": (("row", ), dask_num_chan),
            "CHAN_FREQ": (("row", "chan"), dask_chan_freq),
            "CHAN_WIDTH": (("row", "chan"), dask_chan_width),
        })

        spw_datasets.append(ds)

    # For each cartesian product of SPECTRAL_WINDOW and POLARIZATION
    # create a corresponding DATA_DESCRIPTION.
    # Each column has fixed shape so we handle all rows at once
    spw_ids, pol_ids = zip(
        *product(range(len(num_chans)), range(len(corr_types))))
    dask_spw_ids = da.asarray(np.asarray(spw_ids, dtype=np.int32))
    dask_pol_ids = da.asarray(np.asarray(pol_ids, dtype=np.int32))
    ddid_datasets.append(
        Dataset({
            "SPECTRAL_WINDOW_ID": (("row", ), dask_spw_ids),
            "POLARIZATION_ID": (("row", ), dask_pol_ids),
        }))

    # Now create the associated MS dataset
    for ddid, (spw_id, pol_id) in enumerate(zip(spw_ids, pol_ids)):
        # Infer row, chan and correlation shape
        row = sum(chunks['row'])
        chan = spw_datasets[spw_id].CHAN_FREQ.shape[1]
        corr = pol_datasets[pol_id].CORR_TYPE.shape[1]

        # Create some dask vis data
        dims = ("row", "chan", "corr")
        np_data = (rs.normal(size=(row, chan, corr)) +
                   1j * rs.normal(size=(row, chan, corr))).astype(np.complex64)

        data_chunks = tuple((chunks['row'], chan, corr))
        dask_data = da.from_array(np_data, chunks=data_chunks)
        # Create dask ddid column
        dask_ddid = da.full(row, ddid, chunks=chunks['row'], dtype=np.int32)
        dataset = Dataset({
            'DATA': (dims, dask_data),
            'DATA_DESC_ID': (("row", ), dask_ddid)
        })
        ms_datasets.append(dataset)
        all_data.append(dask_data)
        all_data_desc_id.append(dask_ddid)

    ms_writes = xds_to_table(ms_datasets, ms_table_name, columns="ALL")
    ant_writes = xds_to_table(ant_datasets, ant_table_name, columns="ALL")
    pol_writes = xds_to_table(pol_datasets, pol_table_name, columns="ALL")
    spw_writes = xds_to_table(spw_datasets, spw_table_name, columns="ALL")
    ddid_writes = xds_to_table(ddid_datasets, ddid_table_name, columns="ALL")
    source_writes = xds_to_table(src_datasets, src_table_name, columns="ALL")

    dask.compute(ms_writes, ant_writes, pol_writes, spw_writes, ddid_writes,
                 source_writes)

    # Check ANTENNA table correctly created
    with pt.table(ant_table_name, ack=False) as A:
        assert_array_equal(A.getcol("NAME"), names)
        assert_array_equal(A.getcol("POSITION"), position)
        assert_array_equal(A.getcol("OFFSET"), offset)

        required_desc = pt.required_ms_desc("ANTENNA")
        required_columns = set(k for k in required_desc.keys()
                               if not k.startswith("_"))

        assert set(A.colnames()) == set(required_columns)

    # Check POLARIZATION table correctly created
    with pt.table(pol_table_name, ack=False) as P:
        for r, corr_type in enumerate(corr_types):
            assert_array_equal(P.getcol("CORR_TYPE", startrow=r, nrow=1),
                               [corr_type])
            assert_array_equal(P.getcol("NUM_CORR", startrow=r, nrow=1),
                               [len(corr_type)])

        required_desc = pt.required_ms_desc("POLARIZATION")
        required_columns = set(k for k in required_desc.keys()
                               if not k.startswith("_"))

        assert set(P.colnames()) == set(required_columns)

    # Check SPECTRAL_WINDOW table correctly created
    with pt.table(spw_table_name, ack=False) as S:
        for r, num_chan in enumerate(num_chans):
            assert_array_equal(
                S.getcol("NUM_CHAN", startrow=r, nrow=1)[0], num_chan)
            assert_array_equal(
                S.getcol("CHAN_FREQ", startrow=r, nrow=1)[0],
                np.linspace(.856e9, 2 * .856e9, num_chan))
            assert_array_equal(
                S.getcol("CHAN_WIDTH", startrow=r, nrow=1)[0],
                np.full(num_chan, .856e9 / num_chan))

        required_desc = pt.required_ms_desc("SPECTRAL_WINDOW")
        required_columns = set(k for k in required_desc.keys()
                               if not k.startswith("_"))

        assert set(S.colnames()) == set(required_columns)

    # We should get a cartesian product out
    with pt.table(ddid_table_name, ack=False) as D:
        spw_id, pol_id = zip(
            *product(range(len(num_chans)), range(len(corr_types))))
        assert_array_equal(pol_id, D.getcol("POLARIZATION_ID"))
        assert_array_equal(spw_id, D.getcol("SPECTRAL_WINDOW_ID"))

        required_desc = pt.required_ms_desc("DATA_DESCRIPTION")
        required_columns = set(k for k in required_desc.keys()
                               if not k.startswith("_"))

        assert set(D.colnames()) == set(required_columns)

    with pt.table(src_table_name, ack=False) as S:
        for r, (name, direction, rest_freq) in enumerate(sources):
            assert_array_equal(S.getcol("NAME", startrow=r, nrow=1)[0], [name])
            assert_array_equal(S.getcol("REST_FREQUENCY", startrow=r, nrow=1),
                               [rest_freq])
            assert_array_equal(S.getcol("DIRECTION", startrow=r, nrow=1),
                               [direction])

    with pt.table(ms_table_name, ack=False) as T:
        # DATA_DESC_ID's are all the same shape
        assert_array_equal(T.getcol("DATA_DESC_ID"),
                           da.concatenate(all_data_desc_id))

        # DATA is variably shaped (on DATA_DESC_ID) so we
        # compared each one separately.
        for ddid, data in enumerate(all_data):
            ms_data = T.getcol("DATA", startrow=ddid * row, nrow=row)
            assert_array_equal(ms_data, data)

        required_desc = pt.required_ms_desc()
        required_columns = set(k for k in required_desc.keys()
                               if not k.startswith("_"))

        # Check we have the required columns
        assert set(T.colnames()) == required_columns.union(
            ["DATA", "DATA_DESC_ID"])
Beispiel #41
0
## changelog
# W.Williams 2014/11/03  add - to give input/output statistics per channel
# W.Williams 2014/11/03  fix - statistics per correlation
# A.Drabent 2019/07/24   write fraction of flagged data into output file (for prefactor3)

import numpy
import pyrap.tables as pt
import os
import sys

msname = str(sys.argv[1])

cliplevelhba = 5.0
cliplevellba = 50.0

t = pt.table(msname, readonly=False)
data = t.getcol('MODEL_DATA')
flag = t.getcol('FLAG')
freq_tab = pt.table(msname + '::SPECTRAL_WINDOW')
freq = freq_tab.getcol('REF_FREQUENCY')

if freq[0] > 100e6:
    cliplevel = cliplevelhba
if freq[0] < 100e6:
    cliplevel = cliplevellba

print('------------------------------')
print('SB Frequency [MHz]', freq[0] / 1e6)
for chan in range(0, numpy.size(data[0, :, 0])):
    print('chan %i : %.5f%% input XX flagged' %
          (chan, 100. * numpy.sum(flag[:, chan, 0] == True) /
def addOriginTable(image, msNames):
    # Concatenate the OBSERVATION subtables of all MSs.
    obsNames = [name + "/OBSERVATION" for name in msNames]
    obstab = pt.table(obsNames, ack=False)
    # Select and rename the required columns.
    # Some columns are not in the LOFAR_OBSERVATION table. Create them by
    # selecting a similarly typed column and fill them later.
    selstr = "LOFAR_OBSERVATION_ID as OBSERVATION_ID"
    selstr += ",LOFAR_SUB_ARRAY_POINTING as SUB_ARRAY_POINTING"
    selstr += ",LOFAR_SUB_ARRAY_POINTING as SUBBAND"
    selstr += ",LOFAR_SUB_ARRAY_POINTING as NUM_CHAN"
    selstr += ",LOFAR_SUB_ARRAY_POINTING as NTIME_AVG"
    selstr += ",LOFAR_SUB_ARRAY_POINTING as NCHAN_AVG"
    selstr += ",LOFAR_OBSERVATION_FREQUENCY_MIN as CHANNEL_WIDTH"
    selstr += ",LOFAR_OBSERVATION_FREQUENCY_MIN as EXPOSURE"
    selstr += ",LOFAR_OBSERVATION_FREQUENCY_MIN as FREQUENCY_MIN"
    selstr += ",LOFAR_OBSERVATION_FREQUENCY_MAX as FREQUENCY_MAX"
    selstr += ",LOFAR_OBSERVATION_FREQUENCY_CENTER as FREQUENCY_CENTER"
    selstr += ",LOFAR_OBSERVATION_START as START"
    selstr += ",LOFAR_OBSERVATION_END as END"
    selstr += ",FLAG_ROW"
    sel = obstab.select(selstr)
    # Copy the subtable to the image and add it as a subtable.
    subtab = sel.copy(image.name() + "/" + "LOFAR_ORIGIN", deep=True)
    subtab = pt.table(image.name() + "/" + "LOFAR_ORIGIN",
                      readonly=False,
                      ack=False)
    obstab.close()
    image.putkeyword("ATTRGROUPS." + "LOFAR_ORIGIN", subtab)
    # Set the correct units of columns to update.
    subtab.putcolkeyword("CHANNEL_WIDTH", "QuantumUnits", ["Hz"])
    subtab.putcolkeyword("EXPOSURE", "QuantumUnits", ["s"])
    subtab.putcolkeyword("START", "MEASINFO", {"Ref": "UTC", "type": "epoch"})
    subtab.putcolkeyword("END", "MEASINFO", {"Ref": "UTC", "type": "epoch"})
    # Update the columns not in OBSERVATION table.
    # Get EXPOSURE from first row in main tables.
    # Get NUM_CHAN from SPECTRAL_WINDOW subtables.
    # Calculate CHANNEL_WIDTH (convert from MHz to Hz).
    # Get SUBBAND from MS name.
    for i in range(len(msNames)):
        t = pt.table(msNames[i], ack=False)
        subtab.putcell("EXPOSURE", i, t.getcell("EXPOSURE", 0))
        t1 = pt.table(t.getkeyword("SPECTRAL_WINDOW"), ack=False)
        numchan = t1.getcell("NUM_CHAN", 0)
        subtab.putcell("NUM_CHAN", i, numchan)
        freqs = t1.getcell("CHAN_FREQ", 0)
        fwidths = t1.getcell("CHAN_WIDTH", 0)
        sfreq = freqs[0] - 0.5 * fwidths[0]
        efreq = freqs[-1] + 0.5 * fwidths[-1]
        subtab.putcell("FREQUENCY_MIN", i, sfreq)
        subtab.putcell("FREQUENCY_MAX", i, efreq)
        subtab.putcell("FREQUENCY_CENTER", i, t1.getcell("REF_FREQUENCY", 0))
        subtab.putcell("CHANNEL_WIDTH", i, fwidths[0])
        # Determine the averaging factors.
        avgfreq = 1
        avgtime = 1
        if ("LOFAR_FULL_RES_FLAG" in t.colnames()):
            avgfreq = t.getcolkeyword("LOFAR_FULL_RES_FLAG", "NCHAN_AVG")
            avgtime = t.getcolkeyword("LOFAR_FULL_RES_FLAG", "NTIME_AVG")
        subtab.putcell("NCHAN_AVG", i, avgfreq)
        subtab.putcell("NTIME_AVG", i, avgtime)
        t1.close()
        # Determine nr of data points flagged
        t.close()
        subband = 0
        inx = msNames[i].find("SB")
        if inx >= 0:
            try:
                subband = int(msNames[i][inx + 2:inx + 5])
            except:
                pass
        subtab.putcell("SUBBAND", i, subband)
    # Ready
    subtab.close()
    sel.close()
    print "Added subtable LOFAR_ORIGIN containing", len(msNames), "rows"
Beispiel #43
0
    def __init__(self,
                 MSfiles,
                 factor_working_dir,
                 dirindparmdb,
                 skymodel_dirindep=None,
                 local_dir=None,
                 test_run=False):

        self.files = MSfiles
        self.msnames = [MS.split('/')[-1] for MS in self.files]
        self.working_dir = factor_working_dir
        self.dirindparmdbs = [
            os.path.join(MS, dirindparmdb) for MS in self.files
        ]
        self.skymodel_dirindep = skymodel_dirindep
        self.numMS = len(self.files)

        # Get the frequency info and set name
        sw = pt.table(self.files[0] + '::SPECTRAL_WINDOW', ack=False)
        self.freq = sw.col('REF_FREQUENCY')[0]
        self.nchan = sw.col('NUM_CHAN')[0]
        self.chan_freqs_hz = sw.col('CHAN_FREQ')[0]
        self.chan_width_hz = sw.col('CHAN_WIDTH')[0][0]
        sw.close()
        self.name = 'Band_{0:.2f}MHz'.format(self.freq / 1e6)
        self.log = logging.getLogger('factor:{}'.format(self.name))
        self.log.debug('Band name is {}'.format(self.name))
        self.chunks_dir = os.path.join(factor_working_dir, 'chunks', self.name)

        # Do some checks
        self.check_freqs()
        self.check_parmdb()

        # Get the field RA and Dec
        obs = pt.table(self.files[0] + '::FIELD', ack=False)
        self.ra = np.degrees(float(obs.col('REFERENCE_DIR')[0][0][0]))
        if self.ra < 0.:
            self.ra = 360.0 + (self.ra)
        self.dec = np.degrees(float(obs.col('REFERENCE_DIR')[0][0][1]))
        obs.close()

        # Get the station diameter
        ant = pt.table(self.files[0] + '::ANTENNA', ack=False)
        self.diam = float(ant.col('DISH_DIAMETER')[0])
        ant.close()

        # Find mean elevation and FOV
        for MS_id in xrange(self.numMS):
            # Add (virtual) elevation column to MS
            try:
                pt.addDerivedMSCal(self.files[MS_id])
            except RuntimeError:
                # RuntimeError indicates column already exists
                pass

            # Calculate mean elevation
            tab = pt.table(self.files[MS_id], ack=False)
            if MS_id == 0:
                global_el_values = tab.getcol('AZEL1', rowincr=10000)[:, 1]
            else:
                global_el_values = np.hstack(
                    (global_el_values, tab.getcol('AZEL1', rowincr=10000)[:,
                                                                          1]))
            tab.close()

            # Remove (virtual) elevation column from MS
            pt.removeDerivedMSCal(self.files[MS_id])
        self.mean_el_rad = np.mean(global_el_values)
        sec_el = 1.0 / np.sin(self.mean_el_rad)
        self.fwhm_deg = 1.1 * (
            (3.0e8 / self.freq) / self.diam) * 180. / np.pi * sec_el

        # Check for SUBTRACTED_DATA_ALL column in original datasets
        self.has_sub_data = True
        self.has_sub_data_new = False
        for MSid in xrange(self.numMS):
            tab = pt.table(self.files[MSid], ack=False)
            if not 'SUBTRACTED_DATA_ALL' in tab.colnames():
                self.log.error('SUBTRACTED_DATA_ALL column not found in file '
                               '{}'.format(self.files[MSid]))
                self.has_sub_data = False
            tab.close()
        if not self.has_sub_data:
            self.log.info('Exiting...')
            sys.exit(1)

        # cut input files into chunks if needed
        chunksize = 2400.  # in seconds -> 40min
        self.chunk_input_files(chunksize,
                               dirindparmdb,
                               local_dir=local_dir,
                               test_run=test_run)
        if len(self.files) == 0:
            self.log.error(
                'No data left after checking input files for band: {}. '
                'Probably too little unflagged data.'.format(self.name))
            self.log.info('Exiting!')
            sys.exit(1)

        # Calculate times and number of samples
        self.sumsamples = 0
        self.minSamplesPerFile = 4294967295  # If LOFAR lasts that many seconds then I buy you a beer.
        self.starttime = np.finfo('d').max
        self.endtime = 0.
        for MSid in xrange(self.numMS):
            tab = pt.table(self.files[MSid], ack=False)
            self.starttime = min(self.starttime, np.min(tab.getcol('TIME')))
            self.endtime = max(self.endtime, np.min(tab.getcol('TIME')))
            for t2 in tab.iter(["ANTENNA1", "ANTENNA2"]):
                if (t2.getcell('ANTENNA1', 0)) < (t2.getcell('ANTENNA2', 0)):
                    self.timepersample = t2.col('TIME')[1] - t2.col('TIME')[0]
                    numsamples = t2.nrows()
                    self.sumsamples += numsamples
                    self.minSamplesPerFile = min(self.minSamplesPerFile,
                                                 numsamples)
                    break
            tab.close()

        self.log.debug("Using {0} files.".format(len(self.files)))
        if skymodel_dirindep != None:
            self.log.debug("Using Skymodel: {}".format(
                os.path.basename(skymodel_dirindep)))
Beispiel #44
0
# TO DO, need to fix input datasets with more than 1 freq. channels per MS

import numpy
import pyrap.tables as pt
import sys
import scipy.signal

msname         = str(sys.argv[1])
ionnormfactor  = numpy.float(sys.argv[2])
ionscalefactor = numpy.float(sys.argv[3])
nblockconcat   = numpy.int(sys.argv[4])
colname        = str(sys.argv[5])
chanperblock   = numpy.int(sys.argv[6])

t = pt.table(msname, readonly=False)


freq_tab   = pt.table(msname + '/SPECTRAL_WINDOW')
freq       = freq_tab.getcol('REF_FREQUENCY')
chanfreq   = freq_tab.getcol('CHAN_FREQ')[0]
wav        = 3e8/freq
anttab     = pt.table(msname + '/ANTENNA')
antlist    = anttab.getcol('NAME')

centerfreq = numpy.mean(chanfreq)
freq_res   = numpy.abs(chanfreq[0]-chanfreq[1])



for t2 in t.iter(["ANTENNA1","ANTENNA2"]):     
Beispiel #45
0
    def chunk_input_files(self,
                          chunksize,
                          dirindparmdb,
                          local_dir=None,
                          test_run=False,
                          min_fraction=0.5):
        """
        Make copies of input files that are smaller than 2*chunksize

        Chops off chunk of chunksize length until remainder is smaller than 2*chunksize
        Generates new self.files, self.msnames, and self.dirindparmdbs
        The direction independent parmDBs are fully copied into the new MSs

        Parameters
        ----------
        chunksize : float
            length of a chunk in seconds
        dirindparmdb : str
            Name of direction-independent instrument parmdb inside the new chunk files
        local_dir : str
            Path to local scratch directory for temp output. The file is then
            copied to the original output directory
        test_run : bool, optional
            If True, don't actually do the chopping.
        min_fraction : float, optional
            Minimum fraction of unflaggged data in a time-chunk needed for the chunk
            to be kept. Only used whn chunking large files. (default = 0.1)
        """
        newfiles = []
        newdirindparmdbs = []
        for MS_id in xrange(self.numMS):
            nchunks = 1
            tab = pt.table(self.files[MS_id], ack=False)

            # Make filter for data columns that we don't need. These include imaging
            # columns and those made during initial subtraction
            colnames = tab.colnames()
            colnames_to_remove = [
                'MODEL_DATA', 'CORRECTED_DATA', 'IMAGING_WEIGHT',
                'SUBTRACTED_DATA_HIGH', 'SUBTRACTED_DATA_ALL_NEW',
                'SUBTRACTED_DATA', 'LOFAR_FULL_RES_FLAG'
            ]
            colnames_to_keep = [
                c for c in colnames if c not in colnames_to_remove
            ]

            timepersample = tab.getcell('EXPOSURE', 0)
            timetab = tab.sort('unique desc TIME')
            tab.close()
            timearray = timetab.getcol('TIME')
            timetab.close()
            numsamples = len(timearray)
            mystarttime = np.min(timearray)
            myendtime = np.max(timearray)
            assert (timepersample *
                    (numsamples - 1) + .5) > (myendtime - mystarttime)
            if (myendtime - mystarttime) > (2. * chunksize):
                nchunks = int((numsamples * timepersample) / chunksize)
            if test_run:
                self.log.debug(
                    'Would split (or not) {0} into {1} chunks. '.format(
                        self.files[MS_id], nchunks))
                tab.close()
                continue

            # Define directory where chunks are stored
            newdirname = self.chunks_dir
            if not os.path.exists(newdirname):
                os.mkdir(newdirname)

            if nchunks > 1:
                self.log.debug('Spliting {0} into {1} chunks...'.format(
                    self.files[MS_id], nchunks))

                pool = multiprocessing.Pool()
                results = pool.map(
                    process_chunk_star,
                    itertools.izip(itertools.repeat(self.files[MS_id]),
                                   itertools.repeat(self.dirindparmdbs[MS_id]),
                                   range(nchunks), itertools.repeat(nchunks),
                                   itertools.repeat(mystarttime),
                                   itertools.repeat(myendtime),
                                   itertools.repeat(chunksize),
                                   itertools.repeat(dirindparmdb),
                                   itertools.repeat(colnames_to_keep),
                                   itertools.repeat(newdirname),
                                   itertools.repeat(local_dir),
                                   itertools.repeat(min_fraction)))
                pool.close()
                pool.join()

                for chunk_file, chunk_parmdb in results:
                    if bool(chunk_file) and bool(chunk_parmdb):
                        newfiles.append(chunk_file)
                        newdirindparmdbs.append(chunk_parmdb)
            else:
                # Make symlinks for the files
                chunk_name = '{0}_chunk0.ms'.format(
                    os.path.splitext(os.path.basename(self.files[MS_id]))[0])
                chunk_file = os.path.join(newdirname, chunk_name)
                newdirindparmdb = os.path.join(chunk_file, dirindparmdb)

                if not os.path.exists(chunk_file):
                    # It's a "new" file, check that the chunk has at least min_fraction
                    # unflagged data. If not, then continue with the for loop over MSs
                    # This will re-run for bad files every time factor is started, but the
                    # user could just remove the file from the input directory.
                    if find_unflagged_fraction(
                            self.files[MS_id]) < min_fraction:
                        self.log.debug(
                            'File {} not used because it contains too little unflagged'
                            ' data'.format(os.path.basename(
                                self.files[MS_id])))
                        continue
                    os.symlink(self.files[MS_id], chunk_file)

                if not os.path.exists(newdirindparmdb):
                    os.symlink(self.dirindparmdbs[MS_id], newdirindparmdb)

                newfiles.append(chunk_file)
                newdirindparmdbs.append(newdirindparmdb)

        # Check that each file has at least min_fraction unflagged data. If not, remove
        # it from the file list.
        # This may be come an option, so I kept the code for the time being. AH 14.3.2016
        check_all_unflagged = False
        if check_all_unflagged:
            for f, p in zip(newfiles[:], newdirindparmdbs[:]):
                if self.find_unflagged_fraction(f) < min_fraction:
                    newfiles.remove(f)
                    newdirindparmdbs.remove(p)
                    self.log.debug('Skipping file {0} in further processing '
                                   '(unflagged fraction < {1}%)'.format(
                                       f, min_fraction * 100.0))

        if test_run:
            return
        self.files = newfiles
        self.msnames = [os.path.basename(MS) for MS in self.files]
        self.dirindparmdbs = newdirindparmdbs
        self.numMS = len(self.files)
Beispiel #46
0
def parmDBs2h5parm(h5parmName,
                   parmDBs,
                   antennaFile,
                   fieldFile,
                   skydbFile=None,
                   compression=5,
                   solsetName=None):
    """
    Write the contents of a list of parmDBs into a losoto-style hdf5-file
    h5parmName   - name (path) of the hdf5 file to generate
    parmDBs      - list with the names of the parmDBs
    antennaFile  - name (path) of an ANTENNA table of the observation
    fieldFile    - name (path) of a FIELD table of the observation
    skydbFile    - name (path) of a skydb table of the calibration run (Needed for direction dependent parameters)
    compresion   - compression level for the hdf5-file (0 - none ; 9 - highest)
    solsetName   - name of the solset to generate (default: "sol000")
    """

    # open/create the h5parm file and the solution-set
    h5parmDB = h5parm(h5parmName, readonly=False, complevel=compression)
    solset = h5parmDB.makeSolset(solsetName)

    #open the first instrument table, so that we know where to look for names and stuff
    firstInst = pdb.parmdb(parmDBs[0])

    # get unique list of solution types
    solTypes = list(set(x1.split(":")[0] for x1 in firstInst.getNames()))

    # rewrite solTypes in order to put together
    # Gain <-> DirectionalGain
    # CommonRotationAngle <-> RotationAngle
    # CommonScalarPhase <-> ScalarPhase
    # it also separate Real/Imag/Ampl/Phase into different solTypes
    if "Gain" in solTypes:
        solTypes.remove('Gain')
        solTypes.append('*Gain:*:Real')
        solTypes.append('*Gain:*:Imag')
        solTypes.append('*Gain:*:Ampl')
        solTypes.append('*Gain:*:Phase')
    if "DirectionalGain" in solTypes:
        solTypes.remove('DirectionalGain')
        solTypes.append('*Gain:*:Real')
        solTypes.append('*Gain:*:Imag')
        solTypes.append('*Gain:*:Ampl')
        solTypes.append('*Gain:*:Phase')
    if "RotationAngle" in solTypes:
        solTypes.remove('RotationAngle')
        solTypes.append('*RotationAngle')
    if "CommonRotationAngle" in solTypes:
        solTypes.remove('CommonRotationAngle')
        solTypes.append('*RotationAngle')
    if "RotationMeasure" in solTypes:
        solTypes.remove('RotationMeasure')
        solTypes.append('*RotationMeasure')
    if "ScalarPhase" in solTypes:
        solTypes.remove('ScalarPhase')
        solTypes.append('*ScalarPhase')
    if "CommonScalarPhase" in solTypes:
        solTypes.remove('CommonScalarPhase')
        solTypes.append('*ScalarPhase')
    if "CommonScalarAmplitude" in solTypes:
        solTypes.remove('CommonScalarAmplitude')
        solTypes.append('*ScalarAmplitude')
    # and remove duplicate entries
    solTypes = list(set(solTypes))

    for solType in solTypes:
        if len(firstInst.getNames(solType + ':*')) == 0: continue
        pols = set()
        dirs = set()
        ants = set()
        freqs = set()
        times = set()
        ptype = set()

        for pDBname in parmDBs:
            instrumentdb = pdb.parmdb(pDBname)
            # create the axes grid, necessary if not all entries have the same axes lenght
            data = instrumentdb.getValuesGrid(solType + ':*')
            for solEntry in data:
                pol, dir, ant, parm = parmdbToAxes(solEntry)
                if pol != None: pols |= set([pol])
                if dir != None: dirs |= set([dir])
                if ant != None: ants |= set([ant])
                freqs |= set(data[solEntry]['freqs'])
                times |= set(data[solEntry]['times'])
            #close the parmDB
            instrumentdb = 0

        pols = np.sort(list(pols))
        dirs = np.sort(list(dirs))
        ants = np.sort(list(ants))
        freqs = np.sort(list(freqs))
        times = np.sort(list(times))
        shape = [
            i
            for i in (len(pols), len(dirs), len(ants), len(freqs), len(times))
            if i != 0
        ]
        vals = np.empty(shape)
        vals[:] = np.nan
        weights = np.zeros(shape)

        for pDBname in parmDBs:
            instrumentdb = pdb.parmdb(pDBname)
            # fill the values
            data = instrumentdb.getValuesGrid(solType + ':*')
            if 'Real' in solType:
                dataIm = instrumentdb.getValuesGrid(
                    solType.replace('Real', 'Imag') + ':*')
            if 'Imag' in solType:
                dataRe = instrumentdb.getValuesGrid(
                    solType.replace('Imag', 'Real') + ':*')
            for solEntry in data:
                pol, dir, ant, parm = parmdbToAxes(solEntry)
                ptype |= set([solEntry.split(':')[0]
                              ])  # original parmdb solution type
                freq = data[solEntry]['freqs']
                time = data[solEntry]['times']
                val = data[solEntry]['values']
                # convert Real and Imag in Amp and Phase respectively
                if parm == 'Real':
                    solEntryIm = solEntry.replace('Real', 'Imag')
                    valI = dataIm[solEntryIm]['values']
                    val = np.sqrt((val**2) + (valI**2))
                if parm == 'Imag':
                    solEntryRe = solEntry.replace('Imag', 'Real')
                    valR = dataRe[solEntryRe]['values']
                    val = np.arctan2(val, valR)

                coords = []
                if pol != None:
                    polCoord = np.searchsorted(pols, pol)
                    coords.append(polCoord)
                if dir != None:
                    dirCoord = np.searchsorted(dirs, dir)
                    coords.append(dirCoord)
                if ant != None:
                    antCoord = np.searchsorted(ants, ant)
                    coords.append(antCoord)
                freqCoord = np.searchsorted(freqs, freq)
                timeCoord = np.searchsorted(times, time)
                vals[tuple(coords)][np.ix_(freqCoord, timeCoord)] = val.T
                weights[tuple(coords)][np.ix_(freqCoord, timeCoord)] = 1
            #close the parmDB
            instrumentdb = 0

        vals = np.nan_to_num(vals)  # replace nans with 0 (flagged later)

        if solType == '*RotationAngle':
            np.putmask(weights, vals == 0., 0)  # flag where val=0
            h5parmDB.makeSoltab(solset, 'rotation', axesNames=['dir','ant','freq','time'], \
                    axesVals=[dirs,ants,freqs,times], vals=vals, weights=weights, parmdbType=', '.join(list(ptype)))
        elif solType == '*RotationMeasure':
            np.putmask(weights, vals == 0., 0)  # flag where val=0
            h5parm.makeSoltab(solset, 'rotationmeasure', axesNames=['dir','ant','freq','time'], \
                    axesVals=[dirs,ants,freqs,times], vals=vals, weights=weights, parmdbType=', '.join(list(ptype)))
        elif solType == '*ScalarPhase':
            np.putmask(weights, vals == 0., 0)
            h5parmDB.makeSoltab(solset, 'scalarphase', axesNames=['dir','ant','freq','time'], \
                    axesVals=[dirs,ants,freqs,times], vals=vals, weights=weights, parmdbType=', '.join(list(ptype)))
        elif solType == '*ScalarAmplitude':
            np.putmask(weights, vals == 0., 0)
            h5parm.makeSoltab(solset, 'scalaramplitude', axesNames=['dir','ant','freq','time'], \
                    axesVals=[dirs,ants,freqs,times], vals=vals, weights=weights, parmdbType=', '.join(list(ptype)))
        elif solType == 'Clock':
            np.putmask(weights, vals == 0., 0)
            # clock may be diag or scalar
            if len(pols) == 0:
                h5parm.makeSoltab(solset, 'clock', axesNames=['ant','freq','time'], \
                    axesVals=[ants,freqs,times], vals=vals, weights=weights, parmdbType=', '.join(list(ptype)))
            else:
                h5parm.makeSoltab(solset, 'clock', axesNames=['pol','ant','freq','time'], \
                    axesVals=[pol,ants,freqs,times], vals=vals, weights=weights, parmdbType=', '.join(list(ptype)))
        elif solType == 'TEC':
            np.putmask(weights, vals == 0., 0)
            # tec may be diag or scalar
            if len(pols) == 0:
                h5parm.makeSoltab(solset, 'tec', axesNames=['dir','ant','freq','time'], \
                    axesVals=[dirs,ants,freqs,times], vals=vals, weights=weights, parmdbType=', '.join(list(ptype)))
            else:
                h5parm.makeSoltab(solset, 'tec', axesNames=['pol','dir','ant','freq','time'], \
                    axesVals=[pols,dirs,ants,freqs,times], vals=vals, weights=weights, parmdbType=', '.join(list(ptype)))
        elif solType == '*Gain:*:Real' or solType == '*Gain:*:Ampl':
            np.putmask(
                vals, vals == 0.,
                1)  # nans end up into 1s (as BBS output, flagged next line)
            np.putmask(weights, vals == 1., 0)  # flag where val=1
            h5parmDB.makeSoltab(solset, 'amplitude', axesNames=['pol','dir','ant','freq','time'], \
                    axesVals=[pols,dirs,ants,freqs,times], vals=vals, weights=weights, parmdbType=', '.join(list(ptype)))
        elif solType == '*Gain:*:Imag' or solType == '*Gain:*:Phase':
            np.putmask(weights, vals == 0., 0)  # falg where val=0
            h5parmDB.makeSoltab(solset, 'phase', axesNames=['pol','dir','ant','freq','time'], \
                    axesVals=[pols,dirs,ants,freqs,times], vals=vals, weights=weights, parmdbType=', '.join(list(ptype)))

    antennaTable = pt.table(antennaFile, ack=False)
    antennaNames = antennaTable.getcol('NAME')
    antennaPositions = antennaTable.getcol('POSITION')
    antennaTable.close()
    antennaTable = solset._f_get_child('antenna')
    antennaTable.append(zip(*(antennaNames, antennaPositions)))

    fieldTable = pt.table(fieldFile, ack=False)
    phaseDir = fieldTable.getcol('PHASE_DIR')
    pointing = phaseDir[0, 0, :]
    fieldTable.close()

    sourceTable = solset._f_get_child('source')
    # add the field centre, that is also the direction for Gain and CommonRotationAngle
    sourceTable.append([('pointing', pointing)])

    dirs = []
    for tab in solset._v_children:
        c = solset._f_get_child(tab)
        if c._v_name != 'antenna' and c._v_name != 'source':
            dirs.extend(list(set(c.dir)))
    # remove duplicates
    dirs = list(set(dirs))
    # remove any pointing (already in the table)
    if 'pointing' in dirs:
        dirs.remove('pointing')

    if dirs != []:
        if skydbFile == None:
            logging.critical(
                'No sky table given, but direction dependent parameters in parmDB. Exiting!'
            )
            sys.exit(1)
        sourceFile = skydbFile + '/SOURCES'
        src_table = pt.table(sourceFile, ack=False)
        sub_tables = src_table.getsubtables()
        vals = []
        ra = dec = np.nan
        has_patches_subtable = False
        for sub_table in sub_tables:
            if 'PATCHES' in sub_table:
                has_patches_subtable = True
        if has_patches_subtable:
            # Read values from PATCHES subtable
            src_table.close()
            sourceFile = skydbFile + '/SOURCES/PATCHES'
            src_table = pt.table(sourceFile, ack=False)
            patch_names = src_table.getcol('PATCHNAME')
            patch_ras = src_table.getcol('RA')
            patch_decs = src_table.getcol('DEC')
            for source in dirs:
                try:
                    patch_indx = patch_names.index(source)
                    ra = patch_ras[patch_indx]
                    dec = patch_decs[patch_indx]
                except ValueError:
                    ra = np.nan
                    dec = np.nan
                vals.append([ra, dec])
            src_table.close()
        else:
            # Try to read default values from parmdb instead
            skydb = pdb.parmdb(skydbFile)
            vals = []
            ra = dec = np.nan

            for source in dirs:
                try:
                    ra = skydb.getDefValues('Ra:' + source)['Ra:' +
                                                            source][0][0]
                    dec = skydb.getDefValues('Dec:' + source)['Dec:' +
                                                              source][0][0]
                except KeyError:
                    # Source not found in skymodel parmdb, try to find components
                    ra = np.array(
                        skydb.getDefValues('Ra:*' + source + '*').values())
                    dec = np.array(
                        skydb.getDefValues('Dec:*' + source + '*').values())
                    if len(ra) == 0 or len(dec) == 0:
                        ra = np.nan
                        dec = np.nan
                    else:
                        ra = ra.mean()
                        dec = dec.mean()
                vals.append([ra, dec])
        sourceTable.append(zip(*(dirs, vals)))

    solsetname = solset._v_name
    # close the hdf5-file
    h5parmDB.close()
    return solsetname
Beispiel #47
0
def getfreq(ms):
    t = pt.table(ms + '/SPECTRAL_WINDOW', readonly=True, ack=False)
    freq = t[0]['REF_FREQUENCY']
    t.close()
    return freq
def main(ms_input,
         filename=None,
         mapfile_dir=None,
         numSB=-1,
         hosts=None,
         NDPPPfill=True,
         target_path=None,
         stepname=None,
         mergeLastGroup=False,
         truncateLastSBs=True,
         firstSB=None):
    """
    Check a list of MS files for missing frequencies

    Parameters
    ----------
    ms_input : list or str
        List of MS filenames, or string with list, or path to a mapfile
    filename: str
        Name of output mapfile
    mapfile_dir : str
        Directory for output mapfile
    numSB : int, optional 
        How many files should go into one frequency group. Values <= 0 mean put 
        all files of the same time-step into one group.
        default = -1
    hosts : list or str
        List of hostnames or string with list of hostnames
    NDPPPfill : bool, optional
        Add dummy file-names for missing frequencies, so that NDPPP can
        fill the data with flagged dummy data.
        default = True
    target_path : str, optional
        Change the path of the "groups" files to this. (I.e. write output files 
        into this directory with the subsequent NDPPP call.)
        default = keep path of input files
    stepname : str, optional
        Add this step-name into the file-names of the output files.
    mergeLastGroup, truncateLastSBs : bool, optional
        mergeLastGroup = True, truncateLastSBs = True:
          not allowed
        mergeLastGroup = True, truncateLastSBs = False:
          put the files from the last group that doesn't have SBperGroup subbands 
          into the second last group (which will then have more than SBperGroup entries). 
        mergeLastGroup = False, truncateLastSBs = True:
          ignore last files, that don't make for a full group (not all files are used).
        mergeLastGroup = False, truncateLastSBs = False:
          keep inclomplete last group, or - with NDPPPfill=True - fill
          last group with dummies.      
    firstSB : int, optional
        If set, then reference the grouping of files to this station-subband. As if a file 
        with this station-subband would be included in the input files.
        (For HBA-low, i.e. 0 -> 100MHz, 55 -> 110.74MHz, 512 -> 200MHz)

    Returns
    -------
    result : dict
        Dict with the name of the generated mapfile

    """

    NDPPPfill = input2bool(NDPPPfill)
    mergeLastGroup = input2bool(mergeLastGroup)
    truncateLastSBs = input2bool(truncateLastSBs)
    firstSB = input2int(firstSB)
    numSB = int(numSB)

    if not filename or not mapfile_dir:
        raise ValueError(
            'sort_times_into_freqGroups: filename and mapfile_dir are needed!')
    if mergeLastGroup and truncateLastSBs:
        raise ValueError(
            'sort_times_into_freqGroups: Can either merge the last partial group or truncate at last full group, not both!'
        )


#    if mergeLastGroup:
#        raise ValueError('sort_times_into_freqGroups: mergeLastGroup is not (yet) implemented!')
    if type(ms_input) is str:
        if ms_input.startswith('[') and ms_input.endswith(']'):
            ms_list = [
                f.strip(' \'\"') for f in ms_input.strip('[]').split(',')
            ]
        else:
            map_in = DataMap.load(ms_input)
            map_in.iterator = DataMap.SkipIterator
            ms_list = []
            for fname in map_in:
                if fname.startswith('[') and fname.endswith(']'):
                    for f in fname.strip('[]').split(','):
                        ms_list.append(f.strip(' \'\"'))
                else:
                    ms_list.append(fname.strip(' \'\"'))
    elif type(ms_input) is list:
        ms_list = [str(f).strip(' \'\"') for f in ms_input]
    else:
        raise TypeError(
            'sort_times_into_freqGroups: type of "ms_input" unknown!')

    if type(hosts) is str:
        hosts = [h.strip(' \'\"') for h in hosts.strip('[]').split(',')]
    if not hosts:
        hosts = ['localhost']
    numhosts = len(hosts)
    print "sort_times_into_freqGroups: Working on", len(
        ms_list), "files (including flagged files)."

    time_groups = {}
    # sort by time
    for i, ms in enumerate(ms_list):
        # work only on files selected by a previous step
        if ms.lower() != 'none':
            # use the slower but more reliable way:
            obstable = pt.table(ms, ack=False)
            timestamp = int(round(np.min(obstable.getcol('TIME'))))
            #obstable = pt.table(ms+'::OBSERVATION', ack=False)
            #timestamp = int(round(obstable.col('TIME_RANGE')[0][0]))
            obstable.close()
            if timestamp in time_groups:
                time_groups[timestamp]['files'].append(ms)
            else:
                time_groups[timestamp] = {
                    'files': [ms],
                    'basename': os.path.splitext(ms)[0]
                }
    print "sort_times_into_freqGroups: found", len(time_groups), "time-groups"

    # sort time-groups by frequency
    timestamps = time_groups.keys()
    timestamps.sort()  # not needed now, but later
    first = True
    nchans = 0
    for time in timestamps:
        freqs = []
        for ms in time_groups[time]['files']:
            # Get the frequency info
            sw = pt.table(ms + '::SPECTRAL_WINDOW', ack=False)
            freq = sw.col('REF_FREQUENCY')[0]
            if first:
                file_bandwidth = sw.col('TOTAL_BANDWIDTH')[0]
                nchans = sw.col('CHAN_WIDTH')[0].shape[0]
                chwidth = sw.col('CHAN_WIDTH')[0][0]
                freqset = set([freq])
                first = False
            else:
                assert file_bandwidth == sw.col('TOTAL_BANDWIDTH')[0]
                assert nchans == sw.col('CHAN_WIDTH')[0].shape[0]
                assert chwidth == sw.col('CHAN_WIDTH')[0][0]
                freqset.add(freq)
            freqs.append(freq)
            sw.close()
        time_groups[time]['freq_names'] = zip(freqs,
                                              time_groups[time]['files'])
        time_groups[time]['freq_names'].sort(key=lambda pair: pair[0])
        #time_groups[time]['files'] = [name for (freq,name) in freq_names]
        #time_groups[time]['freqs'] = [freq for (freq,name) in freq_names]
    print "sort_times_into_freqGroups: Collected the frequencies for the time-groups"

    freqliste = np.array(list(freqset))
    freqliste.sort()
    freq_width = np.min(freqliste[1:] - freqliste[:-1])
    if file_bandwidth > freq_width:
        raise ValueError(
            "Bandwidth of files is larger than minimum frequency step between two files!"
        )
    if file_bandwidth < (freq_width / 2.):
        raise ValueError(
            "Bandwidth of files is smaller than half the minimum frequency step between two files! (More than half the data is missing.)"
        )
    #the new output map
    filemap = MultiDataMap()
    groupmap = DataMap()
    maxfreq = np.max(freqliste) + freq_width / 2.
    if firstSB != None:
        minfreq = (float(firstSB) / 512. * 100e6) + 100e6 - freq_width / 2.
        if np.min(freqliste) < minfreq:
            raise ValueError(
                'sort_times_into_freqGroups: Frequency of lowest input data is lower than reference frequency!'
            )
    else:
        minfreq = np.min(freqliste) - freq_width / 2.
    groupBW = freq_width * numSB
    if groupBW < 1e6:
        print 'sort_times_into_freqGroups: ***WARNING***: Bandwidth of concatenated MS is lower than 1 MHz. This may cause conflicts with the concatenated file names!'
    freqborders = np.arange(minfreq, maxfreq, groupBW)
    if mergeLastGroup:
        freqborders[-1] = maxfreq
    elif truncateLastSBs:
        pass  #nothing to do! # left to make the logic more clear!
    elif not truncateLastSBs and NDPPPfill:
        freqborders = np.append(freqborders, (freqborders[-1] + groupBW))
    elif not truncateLastSBs and not NDPPPfill:
        freqborders = np.append(freqborders, maxfreq)

    freqborders = freqborders[freqborders > (np.min(freqliste) - groupBW)]
    ngroups = len(freqborders) - 1
    if ngroups == 0:
        raise ValueError(
            'sort_times_into_freqGroups: Not enough input subbands to create at least one full (frequency-)group!'
        )

    print "sort_times_into_freqGroups: Will create", ngroups, "group(s) with", numSB, "file(s) each."

    hostID = 0
    for time in timestamps:
        (freq, fname) = time_groups[time]['freq_names'].pop(0)
        for groupIdx in xrange(ngroups):
            files = []
            skip_this = True
            filefreqs_low = np.arange(freqborders[groupIdx],
                                      freqborders[groupIdx + 1], freq_width)
            for lower_freq in filefreqs_low:
                if freq > lower_freq and freq < lower_freq + freq_width:
                    assert freq != 1e12
                    files.append(fname)
                    if len(time_groups[time]['freq_names']) > 0:
                        (freq, fname) = time_groups[time]['freq_names'].pop(0)
                    else:
                        (freq, fname) = (1e12, 'This_shouldn\'t_show_up')
                    skip_this = False
                elif NDPPPfill:
                    files.append('dummy.ms')
            if not skip_this:
                filemap.append(
                    MultiDataProduct(hosts[hostID % numhosts], files,
                                     skip_this))
                freqID = int(
                    (freqborders[groupIdx] + freqborders[groupIdx + 1]) / 2e6)
                groupname = time_groups[time]['basename'] + '_%Xt_%dMHz.ms' % (
                    time, freqID)
                if type(stepname) is str:
                    groupname += stepname
                if type(target_path) is str:
                    groupname = os.path.join(target_path,
                                             os.path.basename(groupname))
                groupmap.append(
                    DataProduct(hosts[hostID % numhosts], groupname,
                                skip_this))
        orphan_files = len(time_groups[time]['freq_names'])
        if freq < 1e12:
            orphan_files += 1
        if orphan_files > 0:
            print "sort_times_into_freqGroups: Had %d unassigned files in time-group %xt." % (
                orphan_files, time)
    filemapname = os.path.join(mapfile_dir, filename)
    filemap.save(filemapname)
    groupmapname = os.path.join(mapfile_dir, filename + '_groups')
    groupmap.save(groupmapname)
    # genertate map with edge-channels to flag
    flagmap = _calc_edge_chans(filemap, nchans)
    flagmapname = os.path.join(mapfile_dir, filename + '_flags')
    flagmap.save(flagmapname)
    result = {
        'mapfile': filemapname,
        'groupmapfile': groupmapname,
        'flagmapfile': flagmapname
    }
    return result
Beispiel #49
0
import numpy as np

args=len(sys.argv)
if (args==1):
    print "usage: clip.py [MS name] [column] [global?]";
    sys.exit(1)

doglobal=0
col="DATA"
ms = sys.argv[1]
if (args>2):
    col = sys.argv[2]
if (args>3):
    doglobal = (sys.argv[3]=="G")

t = pt.table(ms, readonly=False, ack=False)
data = t.getcol(col)
mask = t.getcol('FLAG')
print np.sum(mask),'flags are set'
data = np.ma.array(data, dtype=None, mask=mask)
data[np.isnan(data)]=np.ma.masked

ntime, nchan, npol = data.shape
print "Number of channels is",nchan
if (doglobal):
    OXXmed=np.ma.median(abs(data[:,:,0]));
    OYYmed=np.ma.median(abs(data[:,:,0]));
    print "Overall XX and YY medians are",OXXmed,OYYmed
for chan in xrange(nchan):
        IampXX = abs(data[:,chan,0])
        IampYY = abs(data[:,chan,3])
Beispiel #50
0
    def _get_selfcal_parameters(self, measurement_set, parset, major_cycle,
                                nr_cycles):
        """
      0. modify the nof cycle to have a final step at the same resolution 
      as the previous last cycle
      1. Determine target coordinates especially declinaison, because 
      for low dec (<35 deg) UVmin = 0.1 to excluse very short baseline
      2. Determine the frequency and the wavelenght
      3. Determine the longuest baseline and the best resolution avaible
      4. Estimate all imaging parameters
      5. Calculate number of projection planes
      6. Pixelsize must be a string number : number +arcsec

      # Nicolas Vilchez, 2014
      # [email protected]
      """

        # ********************************************************************
        #0. modify the nof cycle to have a final step at the same resolution
        #as the previous last cycle

        if major_cycle < nr_cycles - 1:
            nr_cycles = nr_cycles - 1

        scaling_factor = float(major_cycle) / float(nr_cycles - 1)

        # ********************************************************************
        #1. Determine Target coordinates for UVmin
        tabtarget = pt.table(measurement_set)
        tabfield = pt.table(tabtarget.getkeyword('FIELD'))
        coords = tabfield.getcell('REFERENCE_DIR', 0)
        target = coords[0] * 180.0 / math.pi  # Why

        UVmin = 0
        if target[1] <= 35:  # WHy?
            UVmin = 0.1

        ra_target = target[0] + 360.0  # Why
        dec_target = target[1]

        # ********************************************************************
        # 2. Determine the frequency and the wavelenght
        tabfreq = pt.table(measurement_set)
        table_spectral_window = pt.table(tabfreq.getkeyword("SPECTRAL_WINDOW"))
        frequency = table_spectral_window.getcell('REF_FREQUENCY', 0)

        wavelenght = 3.0E8 / frequency  # Why

        # ********************************************************************
        # 3. Determine the longuest baseline and the best resolution avaible

        tabbaseline = pt.table(measurement_set, readonly=False, ack=True)
        posbaseline = tabbaseline.getcol('UVW')
        maxBaseline = max(posbaseline[:, 0]**2 + posbaseline[:, 1]**2)**0.5

        bestBeamresol = round(
            (wavelenght / maxBaseline) * (180.0 / math.pi) * 3600.0, 0)

        # Beam resolution limitation to 10arcsec to avoid too large images
        if bestBeamresol < 10.0:
            bestBeamresol = 10.0

        # ********************************************************************
        # 4. Estimate all imaging parameters

        # estimate fov
        # fov = 5 degree, except for High HBA Observation => 1.5 degree
        if frequency > 1.9E8:
            fov = 1.5
        else:
            fov = 5.0

        # we need 4 pixel/beam to have enough sampling
        pixPerBeam = 4.0

        # best resolution pixel size (i.e final pixel size for selfcal)
        bestPixelResol = round(bestBeamresol / pixPerBeam, 2)

        # factor to estimate the starting resolution (9 times in this case)
        badResolFactor = 9

        pixsize = round((badResolFactor * bestPixelResol) -
                        (badResolFactor * bestPixelResol - bestPixelResol) *
                        scaling_factor, 3)

        # number of pixel must be a multiple of 2 !!
        nbpixel = int(fov * 3600.0 / pixsize)
        if nbpixel % 2 == 1:
            nbpixel = nbpixel + 1

        robust = 0  #round(1.0 - (3.0 * scaling_factor), 2)

        UVmax = round(
            (wavelenght) / (pixPerBeam * pixsize / 3600.0 * math.pi / 180.0) /
            (1E3 * wavelenght), 3)

        wmax = round(UVmax * (wavelenght) * 1E3, 3)

        # ********************************************************************
        # 5. Calculate number of projection planes
        # Need to compute station diameter (the fov is fixed to 5 degree)
        # using wouter's function, to compute the w_proj_planes
        #    fov and diameter depending on the antenna name
        fov_from_ms, station_diameter = self._get_fov_and_station_diameter(
            measurement_set)

        w_proj_planes = min(
            257, math.floor(
                (maxBaseline * wavelenght) / (station_diameter**2)))
        w_proj_planes = int(round(w_proj_planes))

        # MAximum number of proj planes set to 1024: George Heald, Ger van
        # Diepen if this exception occurs
        maxsupport = max(1024, nbpixel)
        if w_proj_planes > maxsupport:
            raise Exception(
                "The number of projections planes for the current" +
                "measurement set is to large.")

        # Warnings on pixel size
        if nbpixel < 256:
            self.logger.warn(
                "Using a image size smaller then 256x256: This " +
                "leads to problematic imaging in some instances!!")

        # ********************************************************************
        # 6. Pixelsize must be a string number : number +arcsec
        #    conversion at this step
        pixsize = str(pixsize) + 'arcsec'

        # ********************************************************************
        # 7. Threshold determination from the previous cycle
        if major_cycle == 0:
            threshold = '0.075Jy'
        else:
            fits_image_path_list = measurement_set.split('concat.ms')
            fits_image_path   = fits_image_path_list[0] +\
                    'awimage_cycle_%s/image.fits'%(major_cycle-1)

            # open a FITS file
            fitsImage = pyfits.open(fits_image_path)
            scidata = fitsImage[0].data

            dataRange = range(fitsImage[0].shape[2])
            sortedData = range(fitsImage[0].shape[2]**2)

            # FIXME We have the sneaking suspicion that this takes very long
            # due to bad coding style... (double for loop with compute in inner loop)
            for i in dataRange:
                for j in dataRange:
                    sortedData[i * fitsImage[0].shape[2] + j] = scidata[0, 0,
                                                                        i, j]

            sortedData = sorted(sortedData)

            # Percent of faintest data to use to determine 5sigma value : use 5%
            dataPercent = int(fitsImage[0].shape[2] * 0.05)

            fiveSigmaData = sum(sortedData[0:dataPercent]) / dataPercent
            threshold = (abs(fiveSigmaData) / 5.0) * (2.335 / 2.0) * 15

        return pixsize, str(nbpixel), str(wmax), str(w_proj_planes), \
               str(UVmin), str(UVmax), str(robust), str(threshold)
Beispiel #51
0
    def _get_fov_and_station_diameter(self, measurement_set):
        """
        _field_of_view calculates the fov, which is dependend on the
        station type, location and mode:
        For details see:
        (1) http://www.astron.nl/radio-observatory/astronomers/lofar-imaging-capabilities-sensitivity/lofar-imaging-capabilities/lofar
        
        """
        # Open the ms
        table_ms = pt.table(measurement_set)

        # Get antenna name and observation mode
        antenna = pt.table(table_ms.getkeyword("ANTENNA"))
        antenna_name = antenna.getcell('NAME', 0)
        antenna.close()

        observation = pt.table(table_ms.getkeyword("OBSERVATION"))
        antenna_set = observation.getcell('LOFAR_ANTENNA_SET', 0)
        observation.close()

        # static parameters for the station diameters ref (1)
        hba_core_diameter = 30.8
        hba_remote_diameter = 41.1
        lba_inner = 32.3
        lba_outer = 81.3

        # use measurement set information to assertain antenna diameter
        station_diameter = None
        if antenna_name.count('HBA'):
            if antenna_name.count('CS'):
                station_diameter = hba_core_diameter
            elif antenna_name.count('RS'):
                station_diameter = hba_remote_diameter
        elif antenna_name.count('LBA'):
            if antenna_set.count('INNER'):
                station_diameter = lba_inner
            elif antenna_set.count('OUTER'):
                station_diameter = lba_outer

        # raise exception if the antenna is not of a supported type
        if station_diameter == None:
            self.logger.error(
                    'Unknown antenna type for antenna: {0} , {1}'.format(\
                              antenna_name, antenna_set))
            raise PipelineException(
                "Unknown antenna type encountered in Measurement set")

        # Get the wavelength
        spectral_window_table = pt.table(
            table_ms.getkeyword("SPECTRAL_WINDOW"))
        freq = float(spectral_window_table.getcell("REF_FREQUENCY", 0))
        wave_length = pt.taql('CALC C()') / freq
        spectral_window_table.close()

        # Now calculate the FOV see ref (1)
        # alpha_one is a magic parameter: The value 1.3 is representative for a
        # WSRT dish, where it depends on the dish illumination
        alpha_one = 1.3

        # alpha_one is in radians so transform to degrees for output
        fwhm = alpha_one * (wave_length / station_diameter) * (180 / math.pi)
        fov = fwhm / 2.0
        table_ms.close()

        return fov, station_diameter
Beispiel #52
0
import matplotlib.font_manager as font_manager

#Print closure phase vs time/elevation for three selected antennas and channel range/polarisation

pol = 0
start_chan = 0
end_chan = 14
ant1 = 0
ant2 = 1
ant3 = 2
RAdeg = 123.40025  #phase centre RA,DEC in degrees. Needed for elevation calculation
DECdeg = 48.2179139
xaxis = 'time'

#Open table
t = pt.table(sys.argv[1])

lb.figure(1)
lb.clf()


def phase(antenna1, antenna2):
    t1 = t.query('ANTENNA1= ' + str(antenna1) + ' ' + 'AND ANTENNA2= ' +
                 str(antenna2))
    print '***FLAG ANALYSIS***'
    datapoint = 0
    noflags = 0
    flag = t1.getcolslice("FLAG", [start_chan, pol], [end_chan, pol])
    phase = t1.getcolslice("DATA", [start_chan, pol], [end_chan, pol])
    time = t1.getcol("TIME")
    time = time / (24 * 3600)  #Convert MJD in seconds to days
def simulate(args):
    # get full time column and compute row chunks
    ms = table(args.ms)
    time = ms.getcol('TIME')
    row_chunks, tbin_idx, tbin_counts = chunkify_rows(time,
                                                      args.utimes_per_chunk)
    # convert to dask arrays
    tbin_idx = da.from_array(tbin_idx, chunks=(args.utimes_per_chunk))
    tbin_counts = da.from_array(tbin_counts, chunks=(args.utimes_per_chunk))
    n_time = tbin_idx.size
    ant1 = ms.getcol('ANTENNA1')
    ant2 = ms.getcol('ANTENNA2')
    n_ant = np.maximum(ant1.max(), ant2.max()) + 1
    flag = ms.getcol("FLAG")
    n_row, n_freq, n_corr = flag.shape
    if n_corr == 4:
        model_corr = (2, 2)
        jones_corr = (2, )
    elif n_corr == 2:
        model_corr = (2, )
        jones_corr = (2, )
    elif n_corr == 1:
        model_corr = (1, )
        jones_corr = (1, )
    else:
        raise RuntimeError("Invalid number of correlations")
    ms.close()

    # get phase dir
    radec0 = table(args.ms + '::FIELD').getcol('PHASE_DIR').squeeze()

    # get freqs
    freq = table(args.ms + '::SPECTRAL_WINDOW').getcol('CHAN_FREQ')[0].astype(
        np.float64)
    assert freq.size == n_freq

    # get source coordinates from lsm
    lsm = Tigger.load(args.sky_model)
    radec = []
    stokes = []
    spi = []
    ref_freqs = []

    for source in lsm.sources:
        radec.append([source.pos.ra, source.pos.dec])
        stokes.append([source.flux.I])
        tmp_spec = source.spectrum
        spi.append([tmp_spec.spi if tmp_spec is not None else 0.0])
        ref_freqs.append([tmp_spec.freq0 if tmp_spec is not None else 1.0])

    n_dir = len(stokes)
    radec = np.asarray(radec)
    lm = radec_to_lm(radec, radec0)

    # load in the model file
    model = np.zeros((n_freq, n_dir) + model_corr)
    stokes = np.asarray(stokes)
    ref_freqs = np.asarray(ref_freqs)
    spi = np.asarray(spi)
    for d in range(n_dir):
        Stokes_I = stokes[d] * (freq / ref_freqs[d])**spi[d]
        if n_corr == 4:
            model[:, d, 0, 0] = Stokes_I
            model[:, d, 1, 1] = Stokes_I
        elif n_corr == 2:
            model[:, d, 0] = Stokes_I
            model[:, d, 1] = Stokes_I
        else:
            model[:, d, 0] = Stokes_I

    # append antenna columns
    cols = []
    cols.append('ANTENNA1')
    cols.append('ANTENNA2')
    cols.append('UVW')

    # load in gains
    jones, alphas = make_screen(lm, freq, n_time, n_ant, jones_corr[0])
    jones = jones.astype(np.complex128)
    jones_shape = jones.shape
    jones_da = da.from_array(jones,
                             chunks=(args.utimes_per_chunk, ) +
                             jones_shape[1::])

    freqs = da.from_array(freq, chunks=(n_freq))
    lm = da.from_array(np.tile(lm[None], (n_time, 1, 1)),
                       chunks=(args.utimes_per_chunk, n_dir, 2))
    # change model to dask array
    tmp_shape = (n_time, )
    for i in range(len(model.shape)):
        tmp_shape += (1, )
    model = da.from_array(np.tile(model[None], tmp_shape),
                          chunks=(args.utimes_per_chunk, ) + model.shape)

    # load data in in chunks and apply gains to each chunk
    xds = xds_from_ms(args.ms, columns=cols, chunks={"row": row_chunks})[0]
    ant1 = xds.ANTENNA1.data
    ant2 = xds.ANTENNA2.data
    uvw = xds.UVW.data

    # apply gains
    data = compute_and_corrupt_vis(tbin_idx, tbin_counts, ant1, ant2, jones_da,
                                   model, uvw, freqs, lm)

    # Assign visibilities to args.out_col and write to ms
    xds = xds.assign(
        **{
            args.out_col: (("row", "chan", "corr"),
                           data.reshape(n_row, n_freq, n_corr))
        })
    # Create a write to the table
    write = xds_to_table(xds, args.ms, [args.out_col])

    # Submit all graph computations in parallel
    with ProgressBar():
        write.compute()

    return jones, alphas
def AW_Steps(g, usemask, aw_env, nit, maxb, initialiters, mosaic, automaticthresh, bandsthreshs_dict, uvORm, userthresh, padding, out, env):
	"""
	Performs imaging with AWimager using user supplied settings.
	"""
	c=299792458.
	if g.find("/"):
		logname=g.split("/")[-1]
	else:
		logname=g
	obsid=logname.split("_")[0]
	imagename=os.path.join(out, obsid, logname+".img")
	ft = pt.table(g+'/SPECTRAL_WINDOW', ack=False)
	freq = ft.getcell('REF_FREQUENCY',0)
	wave_len=c/freq
	if uvORm == "M":
		UVmax=maxb/(wave_len*1000.)
		localmaxb=maxb
	else:
		UVmax=maxb
		localmaxb=UVmax*(wave_len*1000.)
	ft.close()
	print "Wavelength = {0:00.02f} m / UVmax = {1}".format(wave_len, UVmax)
	beam=int(g.split("SAP")[1][:3])
	beamc="SAP00{0}".format(beam)
	finish_iters=nit
	if usemask:
		mask=os.path.join(out,"masks","{0}_{1}.mask".format(obsid, beamc))
	aw_parset_name="aw_{0}.parset".format(logname)
	if automaticthresh:
		curr_band=g.split("BAND")[1][:2]
		local_parset=open(aw_parset_name, 'w')
		local_parset.write("\nms={0}\n\
image={1}\n\
niter={2}\n\
threshold={3}Jy\n\
UVmax={4}\n".format(g, imagename, initialiters, 6.*bandsthreshs_dict[curr_band], UVmax))
		# if not nomask:
			# local_parset.write("mask={0}\n".format(mask))
		for i in aw_sets:
			local_parset.write(i)
		local_parset.close()
		print "Imaging {0} with AWimager...".format(g)
		subprocess.call("awimager {0} > {1}/{2}/logs/awimager_{3}_standalone_initial_log.txt 2>&1".format(aw_parset_name, out, obsid, logname), env=aw_env, shell=True)
		subprocess.call("image2fits in={0}.residual out={0}.fits > {1}/{2}/logs/image2fits.log 2>&1".format(imagename, out, obsid), shell=True)
		try:
			thresh=2.5*(getimgstd("{0}.fits".format(imagename)))
		except:
			return
		print "Cleaning {0} to threshold of {1}...".format(g, thresh)
		os.remove("{0}.fits".format(imagename))
	else:
		thresh=userthresh
	local_parset=open(aw_parset_name, 'w')
	local_parset.write("\nms={0}\n\
image={1}\n\
niter={2}\n\
threshold={3}Jy\n\
UVmax={4}\n".format(g, imagename, finish_iters, thresh, UVmax))
	if usemask:
		local_parset.write("mask={0}\n".format(mask))
	for i in aw_sets:
		local_parset.write(i)
	local_parset.close()
	print "Cleaning {0} to threshold of {1}...".format(g, thresh)
	subprocess.call("awimager {0} > {1}/{2}/logs/awimager_{3}_standalone_final_log.txt 2>&1".format(aw_parset_name, out, obsid, logname), env=aw_env, shell=True)
	if mosaic:
		subprocess.call("cp -r {0}.restored.corr {0}_mosaic.restored.corr".format(imagename), shell=True)
		subprocess.call("cp -r {0}0.avgpb {0}_mosaic0.avgpb".format(imagename), shell=True)
		if env=="rsm-mainline":
			if padding > 1.0:
				#we need to correct the avgpb for mosaicing
				print "Correcting {0} mosaic padding...".format(imagename)
				avgpb=pt.table("{0}_mosaic0.avgpb".format(imagename), ack=False, readonly=False)
				coordstable=avgpb.getkeyword('coords')
				coordstablecopy=coordstable.copy()
				value1=coordstablecopy['direction0']['crpix'][0]
				value2=coordstablecopy['direction0']['crpix'][1]
				value1*=padding
				value2*=padding
				newcrpix=np.array([value1, value2])
				coordstablecopy['direction0']['crpix']=newcrpix
				avgpb.putkeyword('coords', coordstablecopy)
				avgpb.close()
		subprocess.call("mv {0}*mosaic* {1}".format(imagename, os.path.join(out, obsid, "mosaics")), shell=True)
	subprocess.call("addImagingInfo {0}.restored.corr '' 0 {1} {2} > {3}/{4}/logs/addImagingInfo_standalone_{4}_log.txt 2>&1".format(imagename, localmaxb, g, out, obsid, logname), shell=True)
	subprocess.call("image2fits in={0}.restored.corr out={0}.fits > {1}/{2}/logs/image2fits.log 2>&1".format(imagename, out, obsid), shell=True)
	os.remove(aw_parset_name)
Beispiel #55
0
    def _get_imaging_parameters(self, measurement_set, parset,
                                autogenerate_parameters, specify_fov, fov):
        """
        (1) calculate and format some parameters that are determined runtime.
        Based  on values in the measurementset and input parameter (set):
        
        a. <string> The cellsize
        b. <int> The npixels in a each of the two dimension of the image
        c. <string> The largest baseline in the ms smaller then the maxbaseline
        d. <string> The number of projection planes
        
        The calculation of these parameters is done in three steps:
        
        1. Calculate intermediate results based on the ms. 
        2. The calculation of the actual target values using intermediate
           result       
        """
        # *********************************************************************
        # 1. Get partial solutions from the parameter set
        # Get the parset and a number of raw parameters from this parset
        parset_object = get_parset(parset)
        baseline_limit = parset_object.getInt('maxbaseline')

        # Get the longest baseline
        max_baseline = pt.taql(
                        'CALC sqrt(max([select sumsqr(UVW[:2]) from ' + \
            '{0} where sumsqr(UVW[:2]) <{1} giving as memory]))'.format(\
            measurement_set, baseline_limit *
            baseline_limit))[0]  # ask ger van diepen for details if ness.
        # Calculate the wave_length
        table_ms = pt.table(measurement_set)
        table_spectral_window = pt.table(
            table_ms.getkeyword("SPECTRAL_WINDOW"))
        freq = table_spectral_window.getcell("REF_FREQUENCY", 0)

        table_spectral_window.close()
        wave_length = pt.taql('CALC C()') / freq
        wave_length = wave_length[0]

        # Calculate the cell_size from the ms
        arc_sec_in_degree = 3600
        arc_sec_in_rad = (180.0 / math.pi) * arc_sec_in_degree
        cell_size = (1.0 / 3) * (wave_length / float(max_baseline))\
             * arc_sec_in_rad

        # Calculate the number of pixels in x and y dim
        #    fov and diameter depending on the antenna name
        fov_from_ms, station_diameter = self._get_fov_and_station_diameter(
            measurement_set)

        # use fov for to calculate a semi 'user' specified npix and cellsize
        # The npix thus depends on the ms cellsize and fov
        # Do not use use supplied Fov if autogenerating
        if not autogenerate_parameters and specify_fov:
            if fov == 0.0:
                raise PipelineException("fov set to 0.0: invalid value.")

        # else use full resolution (calculate the fov)
        else:
            self.logger.info("Using fov calculated on measurement data: " +
                             str(fov_from_ms))
            fov = fov_from_ms

        # ********************************************************************
        # 2. Calculate the ms based output variables
        # 'optimal' npix based on measurement set calculations or user specified
        npix = (arc_sec_in_degree * fov) / cell_size

        # Get the closest power of two larger then the calculated pixel size
        npix = self._nearest_ceiled_power2(npix)

        # Get the max w with baseline < 10000
        w_max = pt.taql('CALC max([select UVW[2] from ' + \
            '{0} where sumsqr(UVW[:2]) <{1} giving as memory])'.format(
            measurement_set, baseline_limit * baseline_limit))[0]

        # Calculate number of projection planes
        w_proj_planes = min(
            257,
            math.floor((max_baseline * wave_length) / (station_diameter**2)))
        w_proj_planes = int(round(w_proj_planes))

        # MAximum number of proj planes set to 1024: George Heald, Ger van
        # Diepen if this exception occurs
        maxsupport = max(1024, npix)
        if w_proj_planes > maxsupport:
            raise Exception(
                "The number of projections planes for the current" +
                "measurement set is to large.")

        # *********************************************************************
        # 3. if the npix from the parset is different to the ms calculations,
        # calculate a sizeconverter value  (to be applied to the cellsize)
        if npix < 256:
            self.logger.warn(
                "Using a image size smaller then 256x256:"
                " This leads to problematic imaging in some instances!!")

        # If we are not autocalculating based on ms or fov, use the npix
        # and cell_size specified in the parset
        # keep the wmax and w_proj_planes
        if (not autogenerate_parameters and not specify_fov):
            npix = parset_object.getString('npix')
            cell_size_formatted = parset_object.getString('cellsize')
        else:
            cell_size_formatted = str(int(round(cell_size))) + 'arcsec'

        self.logger.info(
            "Using the following awimager parameters:"
            " cell_size: {0}, npix: {1},".format(cell_size_formatted, npix) +
            " w_max: {0}, w_proj_planes: {1}".format(w_max, w_proj_planes))

        return cell_size_formatted, str(npix), str(w_max), str(w_proj_planes)
def calibrate(args, jones, alphas):
    # simple calibration to test if simulation went as expected.
    # Note do not run on large data set

    # load data
    ms = table(args.ms)
    time = ms.getcol('TIME')
    _, tbin_idx, tbin_counts = chunkify_rows(time, args.utimes_per_chunk)
    n_time = tbin_idx.size
    ant1 = ms.getcol('ANTENNA1')
    ant2 = ms.getcol('ANTENNA2')
    n_ant = np.maximum(ant1.max(), ant2.max()) + 1
    uvw = ms.getcol('UVW').astype(np.float64)
    data = ms.getcol(args.out_col)  # this is where we put the data
    # we know it is pure Stokes I so we can solve using diagonals only
    data = data[:, :, (0, 3)].astype(np.complex128)
    n_row, n_freq, n_corr = data.shape
    flag = ms.getcol('FLAG')
    flag = flag[:, :, (0, 3)]

    # get phase dir
    radec0 = table(args.ms + '::FIELD').getcol('PHASE_DIR').squeeze().astype(
        np.float64)

    # get freqs
    freq = table(args.ms + '::SPECTRAL_WINDOW').getcol('CHAN_FREQ')[0].astype(
        np.float64)
    assert freq.size == n_freq

    # now get the model
    # get source coordinates from lsm
    lsm = Tigger.load(args.sky_model)
    radec = []
    stokes = []
    spi = []
    ref_freqs = []

    for source in lsm.sources:
        radec.append([source.pos.ra, source.pos.dec])
        stokes.append([source.flux.I])
        tmp_spec = source.spectrum
        spi.append([tmp_spec.spi if tmp_spec is not None else 0.0])
        ref_freqs.append([tmp_spec.freq0 if tmp_spec is not None else 1.0])

    n_dir = len(stokes)
    radec = np.asarray(radec)
    lm = radec_to_lm(radec, radec0)

    # get model visibilities
    model = np.zeros((n_row, n_freq, n_dir, 2), dtype=np.complex)
    stokes = np.asarray(stokes)
    ref_freqs = np.asarray(ref_freqs)
    spi = np.asarray(spi)
    for d in range(n_dir):
        Stokes_I = stokes[d] * (freq / ref_freqs[d])**spi[d]
        model[:, :, d, 0:1] = im_to_vis(Stokes_I[None, :, None], uvw,
                                        lm[d:d + 1], freq)
        model[:, :, d, 1] = model[:, :, d, 0]

    # set weights to unity
    weight = np.ones_like(data, dtype=np.float64)

    # initialise gains
    jones0 = np.ones((n_time, n_ant, n_freq, n_dir, n_corr),
                     dtype=np.complex128)

    # calibrate
    ti = timeit()
    jones_hat, jhj, jhr, k = gauss_newton(tbin_idx,
                                          tbin_counts,
                                          ant1,
                                          ant2,
                                          jones0,
                                          data,
                                          flag,
                                          model,
                                          weight,
                                          tol=1e-5,
                                          maxiter=100)
    print("%i iterations took %fs" % (k, timeit() - ti))

    # verify result
    for p in range(2):
        for q in range(p):
            diff_true = np.angle(jones[:, p] * jones[:, q].conj())
            diff_hat = np.angle(jones_hat[:, p] * jones_hat[:, q].conj())
            try:
                assert_array_almost_equal(diff_true, diff_hat, decimal=2)
            except Exception as e:
                print(e)
Beispiel #57
0
def closure(vis,tel,lastv=-1,pol=0,use_spw=0,bchan=0,echan=-1,\
            doplot=False,doret=False):

    # Find target source id

    target_id = vis.split('/')[-1].split('_')[0]
    print "target_id", target_id
    print "Antennas for closure phase", tel

    # Find array of requested telescopes and list of telescopes in data

    command = 'taql \'select NAME from %s/ANTENNA\' >closure_txt'%vis
    os.system(command)
    os.system('grep -v select closure_txt >closure_which')
    idxtel = np.loadtxt('closure_which',dtype='S')
    atel = np.unique(np.ravel(tel))

    # For each requested telescope, determine its position in the list
    # If more than one telescope in the list match to within the number
    #   of letters in the requested telescope, keep the first (so CS002
    #   will match to CS002HBA0 and CS002HBA1 will be ignored)
    # Keep a list of telescopes not found, to print if we need to crash
    
    notfound = []
    aidx = np.array([],dtype='int')
    for a in atel:
        found_this = False
        for i in range(len(idxtel)):
            if a==idxtel[i][:len(a)]:
                aidx = np.append(aidx,i)
                found_this = True
        if not found_this:
            notfound.append (a)

    if len(notfound):
        print 'The following telescopes were not found:',notfound


    aidx_s = np.sort(aidx)

    # Make a smaller MS 'as plain' with the required baseline. This is slow
    # but only needs doing once for an arbitrary number of baselines.

    if os.path.exists ('cl_temp.ms'):
        os.system('rm -fr cl_temp.ms')
    command = 'taql \'select from %s where ' % vis
    for i in range (len(aidx_s)):
        for j in range (i+1, len(aidx_s)):
            command += ('ANTENNA1==%d and ANTENNA2==%d' % \
                            (aidx_s[i],aidx_s[j]))
            if i==len(aidx_s)-2 and j==len(aidx_s)-1:
                command += (' giving cl_temp.ms as plain\'')
            else:
                command += (' or ')

    print 'Selecting smaller MS cl_temp.ms, this will take about 4s/Gb:'
    os.system (command)

    # Loop around the requested closure triangles

    clstats = np.array([])
    for tr in tel:
        tri = np.array([],dtype='int')
        for i in range(3):
            tri = np.append (tri, aidx[np.argwhere(atel==tr[i])[0][0]])
        tri = np.sort(tri)
        # Make three reference MSs with pointers into the small MS
        command = 'taql \'select from cl_temp.ms where ANTENNA1==%d and ANTENNA2==%d giving closure_temp1.ms\'' %(tri[0],tri[1])
        os.system(command)
        command = 'taql \'select from cl_temp.ms where ANTENNA1==%d and ANTENNA2==%d giving closure_temp2.ms\'' %(tri[1],tri[2])
        os.system(command)
        command = 'taql \'select from cl_temp.ms where ANTENNA1==%d and ANTENNA2==%d giving closure_temp3.ms\'' %(tri[0],tri[2])
        os.system(command)

        # Load data arrays and get amp, closure phase

        t1 = pt.table('closure_temp1.ms')
        t2 = pt.table('closure_temp2.ms')
        t3 = pt.table('closure_temp3.ms')
        ut = t1.select('TIME')
        spw = t1.select('DATA_DESC_ID')
        d1,d2,d3 = t1.select('DATA'), t2.select('DATA'), t3.select('DATA')
        clthis, cp = get_amp_clph(d1[:lastv],d2[:lastv],d3[:lastv],spw[:lastv],
             pol=0, use_spw=use_spw, bchan=bchan, echan=echan)
        try:
            allcp.append(cp)
        except:
            allcp = [cp]

        os.system('rm -fr closure_temp*ms')
        if os.path.exists ('closure_which'):
            os.system('rm closure_which')
        clstats = np.append (clstats, clthis)
    if doplot:
        cl_mkplot (allcp,tel,target_id)
    clstats = clstats[0] if len(clstats)==1 else clstats
    if doret:
        return clstats,allcp
    else:
        return clstats
Beispiel #58
0
def process_chunk(ms_file,
                  ms_parmdb,
                  chunkid,
                  nchunks,
                  mystarttime,
                  myendtime,
                  chunksize,
                  dirindparmdb,
                  colnames_to_keep,
                  newdirname,
                  local_dir=None,
                  min_fraction=0.1):
    """
    Processes one time chunk of input ms_file and returns new file names

    Parameters
    ----------
    ms_file : str
        Input MS file to chunk
    ms_parmdb : str
        Input dir-independent parmdb for input MS file
    chunkid : int
        ID of chunk
    nchunks : int
        Total number of chunks
    mystarttime : float
        Start time of MS file
    myendtime : float
        End time of MS file
    chunksize : float
        length of a chunk in seconds
    dirindparmdb : str
        Name of direction-independent instrument parmdb inside the new chunk files
    colnames_to_keep : list
        List of column names to keep in output chunk
    newdirname : str
        Name of output directory
    local_dir : str
        Path to local scratch directory for temp output. The file is then
        copied to the original output directory
    min_fraction : float, optional
        Minimum fraction of unflaggged data in a time-chunk needed for the chunk
        to be kept.

    Returns
    -------
    chunk_file : str
        Filename of chunk MS or None
    newdirindparmdb : str
        Filename of direction-independent instrument parmdb for chunk_file or None

    """
    log = logging.getLogger('factor:MS-chunker')
    chunk_name = '{0}_chunk{1}.ms'.format(
        os.path.splitext(os.path.basename(ms_file))[0], chunkid)
    chunk_file = os.path.join(newdirname, chunk_name)
    old_chunk_file = os.path.join(os.path.dirname(ms_file), 'chunks',
                                  chunk_name)

    starttime = mystarttime + chunkid * chunksize
    endtime = mystarttime + (chunkid + 1) * chunksize
    if chunkid == 0:
        starttime -= chunksize
    if chunkid == (nchunks - 1):
        endtime += 2. * chunksize
    tab = pt.table(ms_file, lockoptions='autonoread', ack=False)
    seltab = tab.query('TIME >= ' + str(starttime) + ' && TIME < ' +
                       str(endtime),
                       sortlist='TIME,ANTENNA1,ANTENNA2',
                       columns=','.join(colnames_to_keep))

    if os.path.exists(chunk_file):
        try:
            newtab = pt.table(chunk_file, ack=False)
            if len(newtab) == len(seltab):
                copy = False
            newtab.close()
            log.debug(
                'Chunk {} exists with correct length, not copying!'.format(
                    chunk_name))
        except:
            copy = True
        if copy:
            shutil.rmtree(chunk_file)
    elif os.path.exists(old_chunk_file):
        # For compatibility, also search in old location
        try:
            newtab = pt.table(old_chunk_file, ack=False)
            if len(newtab) == len(seltab):
                copy = False
                chunk_file = old_chunk_file
            newtab.close()
            log.debug(
                'Chunk {} exists with correct length in old directory, not copying!'
                .format(chunk_name))
        except:
            copy = True
    else:
        copy = True

    newdirindparmdb = os.path.join(chunk_file, dirindparmdb)

    if copy:
        if local_dir is not None:
            # Set output to temp directory
            chunk_file_original = chunk_file
            chunk_file = os.path.join(local_dir,
                                      os.path.basename(chunk_file_original))
            if os.path.exists(chunk_file):
                shutil.rmtree(chunk_file)

        log.debug('Going to copy {0} samples to file {1}'.format(
            str(len(seltab)), chunk_file))
        seltab.copy(chunk_file, True)

        if local_dir is not None:
            # Copy temp file to original output location and clean up
            chunk_file_destination_dir = os.path.dirname(chunk_file_original)
            os.system('/usr/bin/rsync -a {0} {1}'.format(
                chunk_file, chunk_file_destination_dir))
            if not os.path.samefile(chunk_file, chunk_file_original):
                shutil.rmtree(chunk_file)
            chunk_file = chunk_file_original

        shutil.copytree(ms_parmdb, newdirindparmdb)
    seltab.close()
    tab.close()

    # Check that the chunk has at least min_fraction unflagged data.
    # If not, then return (None, None)
    if find_unflagged_fraction(chunk_file) < min_fraction:
        log.debug(
            'Chunk {} not used because it contains too little unflagged data'.
            format(chunk_name))
        seltab.close()
        tab.close()
        return (None, None)

    return (chunk_file, newdirindparmdb)
def AW_Steps_split(g, interval, niter, aw_env, maxb, userthresh, uvORm, usemask, mosaic, padding, out, env):
	"""
	Performs imaging with AWimager using user supplied settings.
	"""
	c=299792458.
	tempgettime=pt.table(g, ack=False)
	ms_starttime=tempgettime.col("TIME")[0]#datetime.utcfromtimestamp(quantity(str(tempgettime.col("TIME")[0])+'s').to_unix_time())
	tempgettime.close()
	name=g.split("/")[-1]
	obsid=name.split("_")[0]
	home=os.path.join(out, obsid)
	temp=pt.table("{0}/OBSERVATION".format(g), ack=False)
	timerange=float(temp.col("TIME_RANGE")[0][1])-float(temp.col("TIME_RANGE")[0][0])
	temp.close()
	temp.done()
	ft = pt.table(g+'/SPECTRAL_WINDOW', ack=False)
	freq = ft.getcell('REF_FREQUENCY',0)
	wave_len=c/freq
	UVmax=maxb/(wave_len*1000.)
	ft.close()
	if uvORm == "M":
		UVmax=maxb/(wave_len*1000.)
		localmaxb=maxb
	else:
		UVmax=maxb
		localmaxb=UVmax*(wave_len*1000.)
	print "Wavelength = {0:00.02f} m / UVmax = {1}".format(wave_len, UVmax)
	beam=int(g.split("SAP")[1][:3])
	beamc="SAP00{0}".format(beam)
	if usemask:
		mask=os.path.join(out,"masks","{0}_{1}.mask".format(obsid, beamc))
	num_images=int(timerange/interval)
	interval_min=interval/60.
	interval_min_round=round(interval_min, 2)
	start_time=0.0
	end_time=interval_min_round
	for i in range(num_images):
		try:
			aw_parset_name="aw_{0}.parset".format(name)
			time_tag=".timesplit.{0:05.2f}min.window{1:03d}.img".format(interval_min, i+1)
			imagename=os.path.join(home, name+time_tag)
			imagename_short=name+time_tag
			logname=os.path.join(home,"logs",name+time_tag+".txt")
			thresh=userthresh
			print "Cleaning {0} Window {1:03d} to threshold of {2}...".format(name, i+1, thresh)
			local_parset=open(aw_parset_name, 'w')
			local_parset.write("\nms={0}\n\
image={1}\n\
niter={4}\n\
threshold={5}Jy\n\
t0={2}\n\
t1={3}\n\
UVmax={6}\n".format(g, imagename, start_time, end_time, niter, userthresh, UVmax))
			if usemask:
				local_parset.write("mask={0}\n".format(mask))
			for s in aw_sets:
				local_parset.write(s)
			local_parset.close()
			subprocess.call("awimager {0} > {1} 2>&1".format(aw_parset_name, logname), env=aw_env,  shell=True)
			if mosaic:
				subprocess.call("cp -r {0}.restored.corr {0}_mosaic.restored.corr".format(imagename), shell=True)
				subprocess.call("cp -r {0}0.avgpb {0}_mosaic0.avgpb".format(imagename), shell=True)
				if env=="rsm-mainline":
					if padding > 1.0:
						#we need to correct the avgpb for mosaicing
						print "Correcting {0} mosaic padding...".format(imagename)
						avgpb=pt.table("{0}_mosaic0.avgpb".format(imagename), ack=False, readonly=False)
						coordstable=avgpb.getkeyword('coords')
						coordstablecopy=coordstable.copy()
						value1=coordstablecopy['direction0']['crpix'][0]
						value2=coordstablecopy['direction0']['crpix'][1]
						value1*=padding
						value2*=padding
						newcrpix=np.array([value1, value2])
						coordstablecopy['direction0']['crpix']=newcrpix
						avgpb.putkeyword('coords', coordstablecopy)
						avgpb.close()
				subprocess.call("mv {0}*mosaic* {1}".format(imagename, os.path.join(out, obsid, "mosaics")), shell=True)
			subprocess.call("addImagingInfo {0}.restored.corr '' 0 {1} {2} > {3}/{4}/logs/addImagingInfo_standalone_{5}_log.txt 2>&1".format(imagename, localmaxb, g, out, obsid, imagename_short), shell=True)
			subprocess.call("image2fits in={0}.restored.corr out={0}.fits > {1}/{2}/logs/image2fits.log 2>&1".format(imagename,out,obsid), shell=True)
			if start_time!=0.0:
				temp_change_start=pt.table("{0}.restored.corr/LOFAR_ORIGIN/".format(imagename), ack=False, readonly=False)
				temp_change_start2=pt.table("{0}.restored.corr/LOFAR_OBSERVATION/".format(imagename), ack=False, readonly=False)
				new_ms_startime=ms_starttime+start_time*60.
				temp_change_start.putcell('START',0,new_ms_startime)
				temp_change_start2.putcell('OBSERVATION_START',0,new_ms_startime)
				temp_change_start.close()
				temp_change_start2.close()
				if mosaic:
					mosaic_time=os.path.join(obsid, "mosaics", imagename_short+"_mosaic.restored.corr")
					tempimg=pt.table(mosaic_time, ack=False, readonly=False)
					restored_time=tempimg.getkeyword('coords')
					oldtime=restored_time['obsdate']['m0']['value']
					newtime=oldtime+((i*interval_min_round/(60.*24.)))
					restored_time['obsdate']['m0']['value']=newtime
					tempimg.putkeyword('coords', restored_time)
					tempimg.close()
				# temp_inject.write("taustart_ts={0}".format(new_ms_startime.strftime("%Y-%m-%dT%H:%M:%S.0")))
			injparset="tkp_inject_{0}.parset".format(imagename_short)
			temp_inject=open(injparset, "w")
	 		temp_inject.write("tau_time={0}\n".format(interval))
			temp_inject.close()
			subprocess.call("trap-inject.py {0} {1}.restored.corr > {2}/{3}/logs/{4}_trapinject_log.txt 2>&1".format(injparset, imagename,out,obsid, imagename_short), shell=True)
			os.remove(injparset)
			os.remove(aw_parset_name)
			start_time+=interval_min_round
			end_time+=interval_min_round
		except:
			start_time+=interval_min_round
			end_time+=interval_min_round
			continue
Beispiel #60
0
def test_table_executor_with_proxies(tmpdir, ms):
    from xarrayms.table_executor import _thread_local

    # Clear any state in the TableExecutor
    TableExecutor.close(wait=True)

    ms2 = os.path.join(str(tmpdir), os.path.split(ms)[1])
    shutil.copytree(ms, ms2)

    with pt.table(ms, ack=False) as T:
        time = T.getcol("TIME")

    proxy_one = TableProxy(ms)
    proxy_two = TableProxy(ms2)
    proxy_three = TableProxy(ms)

    # Extract executors from the cache
    cache = TableExecutor._TableExecutor__cache
    refcounts = TableExecutor._TableExecutor__refcounts
    ex1 = cache[ms]
    ex2 = cache[ms2]

    # table name should be in thread local, but not the wrapper
    assert ex1.submit(getattr, _thread_local, "table_name").result() == ms
    assert ex1.submit(getattr, _thread_local, "wrapper", None).result() is None
    # 2 references to ms
    assert refcounts[ms] == 2

    assert ex2.submit(getattr, _thread_local, "table_name").result() == ms2
    assert ex2.submit(getattr, _thread_local, "wrapper", None).result() is None
    # 1 reference to ms
    assert refcounts[ms2] == 1
    assert sorted(cache.keys()) == sorted([ms, ms2])

    # Request data, check that it's valid and
    # check that the wrapper has been created
    assert_array_equal(proxy_one.getcol("TIME").result(), time)
    assert_array_equal(proxy_two.getcol("TIME").result(), time)
    tab1 = ex1.submit(getattr, _thread_local, "wrapper", None).result()
    assert tab1 is not None
    tab2 = ex2.submit(getattr, _thread_local, "wrapper", None).result()
    assert tab2 is not None

    # Close the first proxy, there should be one reference to  ms now
    proxy_one.close()
    assert refcounts[ms] == 1
    assert sorted(cache.keys()) == sorted([ms, ms2])

    # Wrapper still exists on the first executor
    assert ex1.submit(getattr, _thread_local, "table_name").result() == ms
    res = ex1.submit(getattr, _thread_local, "wrapper", None).result()
    assert res is not None

    # Close the third proxy, there should be no references to ms now
    proxy_three.close()

    # Wrapper still exists on the second executor
    assert ex2.submit(getattr, _thread_local, "table_name").result() == ms2
    res = ex2.submit(getattr, _thread_local, "wrapper", None).result()
    assert res is not None

    assert ms not in refcounts
    assert sorted(cache.keys()) == [ms2]

    # Executor has been shutdown
    match_str = "cannot schedule new futures after shutdown"
    with pytest.raises(RuntimeError, match=match_str):
        ex1.submit(lambda: True).result()

    # Close the last proxy, there should be nothing left
    proxy_two.close()
    assert ms2 not in refcounts
    assert len(cache) == 0

    with pytest.raises(RuntimeError, match=match_str):
        ex2.submit(lambda: True).result()

    # Re-create
    proxy_one = TableProxy(ms)
    proxy_two = TableProxy(ms2)
    proxy_three = TableProxy(ms)

    assert sorted(cache.keys()) == sorted([ms, ms2])

    TableExecutor.close(wait=True)
    assert len(cache) == 0

    # Table's should be closed but force close before
    # the temporary directory disappears
    tab1.table.close()
    tab2.table.close()