예제 #1
0
def run(step, parset, H):
    """
    Copy values from a table to another (of the same kind)
    If tables have different sampling, then resample the values
    """
    import numpy as np
    from losoto.h5parm import solFetcher, solWriter

    inTable = parset.getString('.'.join(["LoSoTo.Steps", step, "InTable"]),
                               '')  # complete solset/soltab
    outTable = parset.getString('.'.join(["LoSoTo.Steps", step, "OutTable"]),
                                '')  # complete solset/soltab or ''

    if inTable == '':
        logging.error('InTable is undefined.')
        return 1

    if outTable == '':
        outSolsetName = inTable.split('/')[0]
        outTableName = None
    else:
        outSolsetName = outTable.split('/')[0]
        outTableName = outTable.split('/')[1]

    ss, st = inTable.split('/')
    sf = solFetcher(H.getSoltab(ss, st))

    t = H.makeSoltab(solset = outSolsetName, soltype = sf.getType(), soltab = outTableName, axesNames=sf.getAxesNames(), \
        axesVals=[sf.getAxisValues(axisName) for axisName in sf.getAxesNames()], \
        vals=sf.getValues(retAxesVals = False), weights=sf.getValues(weight = True, retAxesVals = False), parmdbType=sf.t._v_attrs['parmdb_type'])

    sw = solWriter(t)
    sw.addHistory('DUPLICATE (from table %s)' % (inTable))
    return 0
예제 #2
0
파일: abs.py 프로젝트: AHorneffer/losoto
def run( step, parset, H ):

    import numpy as np
    from losoto.h5parm import solFetcher, solWriter

    soltabs = getParSoltabs( step, parset, H )

    for soltab in openSoltabs( H, soltabs ):

        logging.info("Taking ABSolute value of soltab: "+soltab._v_name)

        sf = solFetcher(soltab)
        sw = solWriter(soltab)

        # axis selection
        userSel = {}
        for axis in sf.getAxesNames():
            userSel[axis] = getParAxis( step, parset, H, axis )
        sf.setSelection(**userSel)

        vals = sf.getValues(retAxesVals = False)
        count = np.count_nonzero(vals<0)

        logging.info('Abs: %i points initially negative (%f %%)' % (count,100*float(count)/np.count_nonzero(vals)))

        # writing back the solutions
        sw.setValues(np.abs(vals))

        sw.addHistory('ABSolute value taken')
        
    return 0
예제 #3
0
def run(step, parset, H):

    import numpy as np
    from losoto.h5parm import solFetcher, solWriter

    soltabs = getParSoltabs(step, parset, H)

    for soltab in openSoltabs(H, soltabs):

        logging.info("Taking ABSolute value of soltab: " + soltab._v_name)

        sf = solFetcher(soltab)
        sw = solWriter(soltab)

        # axis selection
        userSel = {}
        for axis in sf.getAxesNames():
            userSel[axis] = getParAxis(step, parset, H, axis)
        sf.setSelection(**userSel)

        vals = sf.getValues(retAxesVals=False)
        count = np.count_nonzero(vals < 0)

        logging.info('Abs: %i points initially negative (%f %%)' %
                     (count, 100 * float(count) / np.count_nonzero(vals)))

        # writing back the solutions
        sw.setValues(np.abs(vals))

        sw.addHistory('ABSolute value taken')

    return 0
예제 #4
0
def run(step, parset, H):

    from losoto.h5parm import solWriter

    soltabs = getParSoltabs(step, parset, H)

    for soltab in openSoltabs(H, soltabs):

        logging.info("Resetting soltab: " + soltab._v_name)

        t = solWriter(soltab)

        # axis selection
        userSel = {}
        for axis in t.getAxesNames():
            userSel[axis] = getParAxis(step, parset, H, axis)
        t.setSelection(**userSel)

        solType = t.getType()

        if solType == 'amplitude':
            t.setValues(1.)
        else:
            t.setValues(0.)

        t.addHistory('RESET')
    return 0
예제 #5
0
파일: reset.py 프로젝트: AHorneffer/losoto
def run( step, parset, H ):

   from losoto.h5parm import solWriter

   soltabs = getParSoltabs( step, parset, H )

   for soltab in openSoltabs( H, soltabs ):

        logging.info("Resetting soltab: "+soltab._v_name)

        t = solWriter(soltab)

        # axis selection
        userSel = {}
        for axis in t.getAxesNames():
            userSel[axis] = getParAxis( step, parset, H, axis )
        t.setSelection(**userSel)

        solType = t.getType()

        if solType == 'amplitude':
            t.setValues(1.)
        else:
            t.setValues(0.)

        t.addHistory('RESET')
   return 0
예제 #6
0
def run( step, parset, H ):
    """
    Copy values from a table to another (of the same kind)
    If tables have different sampling, then resample the values
    """
    import numpy as np
    from losoto.h5parm import solFetcher, solWriter
    
    inTable = parset.getString('.'.join(["LoSoTo.Steps", step, "InTable"]), '' ) # complete solset/soltab
    outTable = parset.getString('.'.join(["LoSoTo.Steps", step, "OutTable"]), '' ) # complete solset/soltab or ''

    if inTable == '':
        logging.error('InTable is undefined.')
        return 1

    if outTable == '':
        outSolsetName = inTable.split('/')[0]
        outTableName = None
    else:
        outSolsetName = outTable.split('/')[0]
        outTableName = outTable.split('/')[1]

    ss, st = inTable.split('/')
    sf = solFetcher(H.getSoltab(ss, st))

    t = H.makeSoltab(solset = outSolsetName, soltype = sf.getType(), soltab = outTableName, axesNames=sf.getAxesNames(), \
        axesVals=[sf.getAxisValues(axisName) for axisName in sf.getAxesNames()], \
        vals=sf.getValues(retAxesVals = False), weights=sf.getValues(weight = True, retAxesVals = False), parmdbType=sf.t._v_attrs['parmdb_type'])

    sw = solWriter(t)
    sw.addHistory('DUPLICATE (from table %s)' % (inTable))
    return 0
예제 #7
0
def plugin_main(args, **kwargs):
    fileid = kwargs['mapfile_in']
    datamap = DataMap.load(fileid)
    hdf5File = os.path.join(kwargs['hdf5_dir'],kwargs['hdf5file'])
    if kwargs.has_key('instrument'):
        instrument = kwargs['instrument']
    else:
        instrument = '/instrument'
    if kwargs.has_key('compression'):
        compression = int(kwargs['compression'])
    else:
        compression = 5
    if kwargs.has_key('solset'):
        solsetName = kwargs['solset']
    else:
        solsetName = None


    # Check is all the necessary files are available
    antennaFile = os.path.join(datamap[0].file,'ANTENNA')
    if not os.path.isdir(antennaFile):
        logging.critical('Missing ANTENNA table.')
        sys.exit(1)
    fieldFile = os.path.join(datamap[0].file,'FIELD')
    if not os.path.isdir(fieldFile):
        logging.critical('Missing FIELD table.')
        sys.exit(1)
    skydbFile = os.path.join(datamap[0].file,'sky')
    if not os.path.isdir(skydbFile):
        logging.critical('Missing sky table.')
        sys.exit(1)
        
    #generate list of parmDB-filenames
    parmDBnames = [ MS.file+instrument for MS in datamap ]

    #create and fill the hdf5-file:
    solset = parmDBs2h5parm(hdf5File, parmDBnames, antennaFile, fieldFile, skydbFile, compression=compression, solsetName=solsetName)

    # Add CREATE entry to history 
    h5parmDB = h5parm(hdf5File, readonly = False)
    soltabs = h5parmDB.getSoltabs(solset=solset)
    for st in soltabs:
        sw = solWriter(soltabs[st])
        sw.addHistory('CREATE (by PipelineStep_losotoImporter from %s / %s - %s)' % (os.path.abspath(''), 
                                   os.path.basename(parmDBnames[0]), os.path.basename(parmDBnames[-1]) ) )
    h5parmDB.close()

    #generate mapfile and wrap up
    mapfileentry = {}
    mapfileentry['host'] = 'localhost'
    mapfileentry['file'] = hdf5File
    mapfileentry['skip'] = False            
    outfileid = os.path.join(kwargs['mapfile_dir'], kwargs['filename'])
    outmap = open(outfileid, 'w')
    outmap.write(repr([mapfileentry]))
    outmap.close()
    result = {}
    result['mapfile'] = outfileid
    return result
예제 #8
0
def run(step, parset, H):

    from losoto.h5parm import solWriter
    import numpy as np

    soltabs = getParSoltabs(step, parset, H)

    weightVal = parset.getFloat('.'.join(["LoSoTo.Steps", step, "WeightVal"]),
                                1.)
    mergeSoltab = parset.getString(
        '.'.join(["LoSoTo.Steps", step, "MergeFromSoltab"]), '')
    flagBad = parset.getBool('.'.join(["LoSoTo.Steps", step, "FlagBad"]),
                             False)

    for soltab in openSoltabs(H, soltabs):

        logging.info("Reweighting soltab: " + soltab._v_name)

        sw = solWriter(soltab)

        # axis selection
        userSel = {}
        for axis in sw.getAxesNames():
            userSel[axis] = getParAxis(step, parset, H, axis)
        sw.setSelection(**userSel)

        if mergeSoltab != '':
            mss, mst = mergeSoltab.split('/')
            msf = solFetcher(H.getSoltab(mss, mst))
            msf.setSelection(**userSel)
            sf = solFetcher(soltab)
            sf.setSelection(**userSel)
            weights, axes = sf.getValues(weight=True)
            mergeWeights, mergeAxes = msf.getValues(weight=True)
            if axes.keys() != mergeAxes.keys(
            ) or weights.shape != mergeWeights.shape:
                logging.error(
                    'Impossible merge two tables with different axes values')
                return 1
            weights[np.where(mergeWeights == 0)] = 0.
            sw.addHistory('WEIGHT merged from ' + mergeSoltab +
                          ' for selection:' + str(userSel))
        else:
            weights = weightVal
            sw.addHistory('REWEIGHTED to ' + str(weightVal) +
                          ' for selection:' + str(userSel))

        sw.setValues(weights, weight=True)

        if flagBad:
            sf = solFetcher(soltab)
            sf.setSelection(**userSel)
            weights = sf.getValues(weight=True, retAxesVals=False)
            vals = sf.getValues(retAxesVals=False)
            if sf.getType() == 'amplitude': weights[np.where(vals == 1)] = 0
            else: weights[np.where(vals == 0)] = 0
            sw.setValues(weights, weight=True)

    return 0
예제 #9
0
def run(step, parset, H):

    import scipy.ndimage.filters
    import numpy as np
    from losoto.h5parm import solFetcher, solWriter

    soltabs = getParSoltabs(step, parset, H)

    s = parset.getIntVector('.'.join(["LoSoTo.Steps", step, "smooth"]), 10)

    for soltab in openSoltabs(H, soltabs):

        logging.info("Smoothing soltab: " + soltab._v_name)

        sf = solFetcher(soltab)
        sw = solWriter(soltab)

        if sf.getType() != 'clock':
            logging.error(
                'Only clock-type solutions can be run in this operation.')
            return 1

        # axis selection
        userSel = {}
        for axis in sf.getAxesNames():
            userSel[axis] = getParAxis(step, parset, H, axis)
        sf.setSelection(**userSel)

        for vals, weights, coord, selection in sf.getValuesIter(
                returnAxes='time', weight=True):

            x = coord['time'][weights != 0]
            y = vals[weights != 0]
            weights = weights[weights != 0]
            spline = scipy.interpolate.UnivariateSpline(x,
                                                        y,
                                                        weights,
                                                        k=1,
                                                        s=1e-15)

            plot = True
            if plot:
                import matplotlib as mpl
                mpl.use("Agg")
                import matplotlib.pyplot as plt
                plt.plot(x, y, 'k.')
                plt.plot(x, spline(x), 'r-')
                plt.savefig('test.png')
                sys.exit(1)

            sw.selection = selection
            sw.setValues(spline(coord['time']))

        sw.addHistory('Smoothed with SMOOTHCLOCK.')
        del sf
        del sw
    return 0
예제 #10
0
def run( step, parset, H ):

    from losoto.h5parm import solFetcher, solWriter

    soltabs = getParSoltabs( step, parset, H )

    axesToExt = parset.getStringVector('.'.join(["LoSoTo.Steps", step, "Axes"]), ['freq','time'] )
    size = parset.getIntVector('.'.join(["LoSoTo.Steps", step, "Size"]), [11,11] )
    percent = parset.getFloat('.'.join(["LoSoTo.Steps", step, "Percent"]), 50 )
    cycles = parset.getInt('.'.join(["LoSoTo.Steps", step, "Cycles"]), 3 )
    ncpu = parset.getInt('.'.join(["LoSoTo.Ncpu"]), 0 )
    if ncpu == 0:
        import multiprocessing
        ncpu = multiprocessing.cpu_count()
    
    if axesToExt == []:
        logging.error("Please specify at least one axis to extend flag.")
        return 1

    for soltab in openSoltabs( H, soltabs ):

        # start processes for multi-thread
        mpm = multiprocManager(ncpu, flag)

        logging.info("Extending flag on soltab: "+soltab._v_name)

        sf = solFetcher(soltab)
        sw = solWriter(soltab)

        # axis selection
        userSel = {}
        for axis in sf.getAxesNames():
            userSel[axis] = getParAxis( step, parset, H, axis )
        sf.setSelection(**userSel)

        for axisToExt in axesToExt:
            if axisToExt not in sf.getAxesNames():
                logging.error('Axis \"'+axisToExt+'\" not found.')
                mpm.wait()
                return 1

        # fill the queue (note that sf and sw cannot be put into a queue since they have file references)
        for vals, weights, coord, selection in sf.getValuesIter(returnAxes=axesToExt, weight=True):
            mpm.put([weights, coord, axesToExt, selection, percent, size, cycles])

        mpm.wait()

        logging.info('Writing solutions')
        for w,sel in mpm.get():
            sw.selection = sel
            sw.setValues(w, weight=True) # convert back to np.float16

        sw.addHistory('FLAG EXTENDED (over %s)' % (str(axesToExt)))
        del sf
        del sw
    return 0
예제 #11
0
def run(step, parset, H):

    from losoto.h5parm import solFetcher, solWriter

    soltabs = getParSoltabs(step, parset, H)

    axesToExt = parset.getStringVector(
        '.'.join(["LoSoTo.Steps", step, "Axes"]), ['freq', 'time'])
    size = parset.getIntVector('.'.join(["LoSoTo.Steps", step, "Size"]),
                               [11, 11])
    percent = parset.getFloat('.'.join(["LoSoTo.Steps", step, "Percent"]), 50)
    cycles = parset.getInt('.'.join(["LoSoTo.Steps", step, "Cycles"]), 3)
    ncpu = parset.getInt('.'.join(["LoSoTo.Ncpu"]), 1)

    if axesToExt == []:
        logging.error("Please specify at least one axis to extend flag.")
        return 1

    # start processes for multi-thread
    mpm = multiprocManager(ncpu, flag)

    for soltab in openSoltabs(H, soltabs):

        logging.info("Extending flag on soltab: " + soltab._v_name)

        sf = solFetcher(soltab)
        sw = solWriter(soltab)

        # axis selection
        userSel = {}
        for axis in sf.getAxesNames():
            userSel[axis] = getParAxis(step, parset, H, axis)
        sf.setSelection(**userSel)

        for axisToExt in axesToExt:
            if axisToExt not in sf.getAxesNames():
                logging.error('Axis \"' + axisToExt + '\" not found.')
                return 1

        # fill the queue (note that sf and sw cannot be put into a queue since they have file references)
        for vals, weights, coord, selection in sf.getValuesIter(
                returnAxes=axesToExt, weight=True):
            mpm.put(
                [weights, coord, axesToExt, selection, percent, size, cycles])

        mpm.wait()

        logging.info('Writing solutions')
        for w, sel in mpm.get():
            sw.selection = sel
            sw.setValues(w, weight=True)  # convert back to np.float16

        sw.addHistory('FLAG EXTENDED (over %s)' % (str(axesToExt)))
        del sf
        del sw
    return 0
예제 #12
0
def main(msfileslist,
         hdf5fileName,
         hdf5_dir='.',
         instrument='/instrument',
         solsetName=None,
         compression=5):
    tmp_msfiles = msfileslist.lstrip('[').rstrip(']').split(',')
    msfiles = [MS.strip("\' \"") for MS in tmp_msfiles]
    hdf5File = os.path.join(hdf5_dir, hdf5fileName)
    compression = int(compression)  #doesn't hurt if it is already an int
    instrument = instrument.strip()

    # Check is all the necessary files are available
    antennaFile = os.path.join(msfiles[0], 'ANTENNA')
    if not os.path.isdir(antennaFile):
        logging.critical('Missing ANTENNA table.')
        sys.exit(1)
    fieldFile = os.path.join(msfiles[0], 'FIELD')
    if not os.path.isdir(fieldFile):
        logging.critical('Missing FIELD table.')
        sys.exit(1)
    skydbFile = os.path.join(msfiles[0], 'sky')
    if not os.path.isdir(skydbFile):
        logging.warning(
            'No sky table found. (Direction-dependent parameters will not work.)'
        )
        skydbFile = None

    #generate list of parmDB-filenames
    parmDBnames = [MS + instrument for MS in msfiles]

    #create and fill the hdf5-file:
    solset = parmDBs2h5parm(hdf5File,
                            parmDBnames,
                            antennaFile,
                            fieldFile,
                            skydbFile,
                            compression=compression,
                            solsetName=solsetName)

    # Add CREATE entry to history
    h5parmDB = h5parm(hdf5File, readonly=False)
    soltabs = h5parmDB.getSoltabs(solset=solset)
    for st in soltabs:
        sw = solWriter(soltabs[st])
        sw.addHistory('CREATE (by losotoImporter from %s / %s - %s)' %
                      (os.path.abspath(''), os.path.basename(
                          parmDBnames[0]), os.path.basename(parmDBnames[-1])))
    h5parmDB.close()

    result = {}
    result['h5parm'] = hdf5File
    return result
예제 #13
0
def run( step, parset, H ):

   from losoto.h5parm import solWriter
   import numpy as np

   soltabs = getParSoltabs( step, parset, H )

   weightVal = parset.getFloat('.'.join(["LoSoTo.Steps", step, "WeightVal"]), 1. )
   mergeSoltab = parset.getString('.'.join(["LoSoTo.Steps", step, "MergeFromSoltab"]), '' )
   flagBad = parset.getBool('.'.join(["LoSoTo.Steps", step, "FlagBad"]), False )

   for soltab in openSoltabs( H, soltabs ):

        logging.info("Reweighting soltab: "+soltab._v_name)

        sw = solWriter(soltab)

        # axis selection
        userSel = {}
        for axis in sw.getAxesNames():
            userSel[axis] = getParAxis( step, parset, H, axis )
        sw.setSelection(**userSel)

        if mergeSoltab != '':
            mss, mst = mergeSoltab.split('/')
            msf = solFetcher(H.getSoltab(mss, mst))
            msf.setSelection(**userSel)
            sf = solFetcher(soltab)
            sf.setSelection(**userSel)
            weights, axes = sf.getValues(weight = True)
            mergeWeights, mergeAxes = msf.getValues(weight = True)
            if axes.keys() != mergeAxes.keys() or weights.shape != mergeWeights.shape:
                logging.error('Impossible merge two tables with different axes values')
                return 1
            weights[ np.where(mergeWeights == 0) ] = 0.
            sw.addHistory('WEIGHT merged from '+mergeSoltab+' for selection:'+str(userSel))
        else:
            weights = weightVal
            sw.addHistory('REWEIGHTED to '+str(weightVal)+' for selection:'+str(userSel))

        sw.setValues(weights, weight=True)

        if flagBad:
            sf = solFetcher(soltab)
            sf.setSelection(**userSel)
            weights = sf.getValues(weight = True, retAxesVals = False)
            vals = sf.getValues(retAxesVals = False)
            if sf.getType() == 'amplitude': weights[np.where(vals == 1)] = 0
            else: weights[np.where(vals == 0)] = 0
            sw.setValues(weights, weight=True)

   return 0
예제 #14
0
def run( step, parset, H ):

    import scipy.ndimage.filters
    import numpy as np
    from losoto.h5parm import solFetcher, solWriter

    soltabs = getParSoltabs( step, parset, H )

    s = parset.getIntVector('.'.join(["LoSoTo.Steps", step, "smooth"]), 10 )

    for soltab in openSoltabs( H, soltabs ):

        logging.info("Smoothing soltab: "+soltab._v_name)

        sf = solFetcher(soltab)
        sw = solWriter(soltab)

        if sf.getType() != 'clock':
            logging.error('Only clock-type solutions can be run in this operation.')
            return 1

        # axis selection
        userSel = {}
        for axis in sf.getAxesNames():
            userSel[axis] = getParAxis( step, parset, H, axis )
        sf.setSelection(**userSel)

        for vals, weights, coord, selection in sf.getValuesIter(returnAxes='time', weight=True):

            x=coord['time'][weights != 0]
            y=vals[weights != 0]
            weights = weights[weights != 0]
            spline = scipy.interpolate.UnivariateSpline(x, y, weights, k=1, s=1e-15)

            plot = True
            if plot:
                import matplotlib as mpl
                mpl.use("Agg")
                import matplotlib.pyplot as plt
                plt.plot(x, y, 'k.')
                plt.plot(x, spline(x), 'r-')
                plt.savefig('test.png')
                sys.exit(1)

            sw.selection = selection
            sw.setValues(spline(coord['time']))

        sw.addHistory('Smoothed with SMOOTHCLOCK.')
        del sf
        del sw
    return 0
예제 #15
0
파일: norm.py 프로젝트: twshimwell/losoto
def run(step, parset, H):
    """
    Normalize the solutions to a given value
    """
    import numpy as np
    from losoto.h5parm import solFetcher, solWriter

    soltabs = getParSoltabs(step, parset, H)
    solTypes = getParSolTypes(step, parset, H)

    normVal = parset.getFloat('.'.join(["LoSoTo.Steps", step, "NormVal"]), 1.)
    normAxes = parset.getStringVector(
        '.'.join(["LoSoTo.Steps", step, "NormAxes"]), ['time'])

    for soltab in openSoltabs(H, soltabs):

        logging.info("Normalizing soltab: " + soltab._v_name)

        tr = solFetcher(soltab)
        tw = solWriter(soltab, useCache=True)  # remember to flush!

        axesNames = tr.getAxesNames()
        for normAxis in normAxes:
            if normAxis not in axesNames:
                logging.error('Normalization axis ' + normAxis + ' not found.')
                return 1

        # axis selection
        userSel = {}
        for axis in tr.getAxesNames():
            userSel[axis] = getParAxis(step, parset, H, axis)
        tr.setSelection(**userSel)

        for vals, weights, coord, selection in tr.getValuesIter(
                returnAxes=normAxes, weight=True):

            # rescale solutions
            if np.sum(weights) == 0: continue  # skip flagged antenna
            valsMean = np.average(vals, weights=weights)
            valsNew = normVal * vals / valsMean
            logging.debug(str(coord))
            logging.debug("Rescaling by: " + str(normVal / valsMean))

            # writing back the solutions
            tw.selection = selection
            tw.setValues(valsNew)

        tw.flush()
        tw.addHistory('NORM (on axis %s)' % (normAxes))

    return 0
예제 #16
0
def run( step, parset, H ):
    """
    Normalize the solutions to a given value
    """
    import numpy as np
    from losoto.h5parm import solFetcher, solWriter
    
    soltabs = getParSoltabs( step, parset, H )
    solTypes = getParSolTypes( step, parset, H )

    normVal = parset.getFloat('.'.join(["LoSoTo.Steps", step, "NormVal"]), 1. )
    normAxes = parset.getStringVector('.'.join(["LoSoTo.Steps", step, "NormAxes"]), ['time'] )

    for soltab in openSoltabs( H, soltabs ):

        logging.info("Normalizing soltab: "+soltab._v_name)

        tr = solFetcher(soltab)
        tw = solWriter(soltab, useCache = True) # remember to flush!

        axesNames = tr.getAxesNames()
        for normAxis in normAxes:
            if normAxis not in axesNames:
                logging.error('Normalization axis '+normAxis+' not found.')
                return 1

        # axis selection
        userSel = {}
        for axis in tr.getAxesNames():
            userSel[axis] = getParAxis( step, parset, H, axis )
        tr.setSelection(**userSel)

        for vals, weights, coord, selection in tr.getValuesIter(returnAxes=normAxes, weight = True):

            # rescale solutions
            if np.sum(weights) == 0: continue # skip flagged selections
            valsMean = np.average(vals, weights=weights)
            vals[weights != 0] *= normVal/valsMean
            logging.debug(str(coord))
            logging.debug("Rescaling by: "+str(normVal/valsMean))

            # writing back the solutions
            tw.selection = selection
            tw.setValues(vals)

        tw.flush()
        tw.addHistory('NORM (on axis %s)' % (normAxes))

    return 0
예제 #17
0
def main(msfileslist, hdf5fileName, hdf5_dir='.', instrument='/instrument', solsetName=None, compression=5):
    tmp_msfiles = msfileslist.lstrip('[').rstrip(']').split(',')
    msfiles = [ MS.strip("\' \"") for MS in tmp_msfiles]
    hdf5File = os.path.join(hdf5_dir,hdf5fileName)
    compression = int(compression) #doesn't hurt if it is already an int
    instrument = instrument.strip()

    # Check is all the necessary files are available
    antennaFile = os.path.join(msfiles[0],'ANTENNA')
    if not os.path.isdir(antennaFile):
        logging.critical('Missing ANTENNA table.')
        sys.exit(1)
    fieldFile = os.path.join(msfiles[0],'FIELD')
    if not os.path.isdir(fieldFile):
        logging.critical('Missing FIELD table.')
        sys.exit(1)
    skydbFile = os.path.join(msfiles[0],'sky')
    if not os.path.isdir(skydbFile):
        logging.critical('Missing sky table.')
        sys.exit(1)
        
    #generate list of parmDB-filenames
    parmDBnames = [ MS+instrument for MS in msfiles ]

    #create and fill the hdf5-file:
    solset = parmDBs2h5parm(hdf5File, parmDBnames, antennaFile, fieldFile, skydbFile, compression=compression, solsetName=solsetName)

    # Add CREATE entry to history 
    h5parmDB = h5parm(hdf5File, readonly = False)
    soltabs = h5parmDB.getSoltabs(solset=solset)
    for st in soltabs:
        sw = solWriter(soltabs[st])
        sw.addHistory('CREATE (by losotoImporter from %s / %s - %s)' % (os.path.abspath(''), 
                                   os.path.basename(parmDBnames[0]), os.path.basename(parmDBnames[-1]) ) )
    h5parmDB.close()

    result = {}
    result['h5parm'] = hdf5File
    return result
예제 #18
0
파일: smooth.py 프로젝트: twshimwell/losoto
def run(step, parset, H):

    import scipy.ndimage.filters
    import numpy as np
    from losoto.h5parm import solFetcher, solWriter

    soltabs = getParSoltabs(step, parset, H)

    axesToSmooth = parset.getStringVector(
        '.'.join(["LoSoTo.Steps", step, "Axes"]), [])
    FWHM = parset.getIntVector('.'.join(["LoSoTo.Steps", step, "FWHM"]), [])
    mode = parset.getString('.'.join(["LoSoTo.Steps", step, "Mode"]),
                            "runningmedian")

    if mode == "runningmedian" and len(axesToSmooth) != len(FWHM):
        logging.error("Axes and FWHM lenghts must be equal.")
        return 1

    if mode == "runningmedian":
        logging.warning('Flagged data are still taken into account!')

    if FWHM != [] and mode != "runningmedian":
        logging.warning(
            "FWHM makes sense only with runningmedian mode, ignoring it.")

    for soltab in openSoltabs(H, soltabs):

        logging.info("Smoothing soltab: " + soltab._v_name)

        sf = solFetcher(soltab)
        sw = solWriter(soltab, useCache=True)  # remember to flush!

        # axis selection
        userSel = {}
        for axis in sf.getAxesNames():
            userSel[axis] = getParAxis(step, parset, H, axis)
        sf.setSelection(**userSel)

        for i, axis in enumerate(axesToSmooth[:]):
            if axis not in sf.getAxesNames():
                del axesToSmooth[i]
                del FWHM[i]
                logging.warning('Axis \"' + axis + '\" not found. Ignoring.')

        for vals, weights, coord, selection in sf.getValuesIter(
                returnAxes=axesToSmooth, weight=True):

            if mode == 'runningmedian':
                valsnew = scipy.ndimage.filters.median_filter(vals, FWHM)
            elif mode == 'median':
                valsnew = np.median(vals[(weights != 0)])
            elif mode == 'mean':
                valsnew = np.mean(vals[(weights != 0)])
            else:
                logging.error('Mode must be: runningmedian, median or mean')
                return 1

            sw.selection = selection
            sw.setValues(valsnew)

        sw.flush()
        sw.addHistory('SMOOTH (over %s with mode = %s)' % (axesToSmooth, mode))
        del sf
        del sw
    return 0
예제 #19
0
def run( step, parset, H ):

    from losoto.h5parm import solFetcher, solWriter

    soltabs = getParSoltabs( step, parset, H )

    #check_parset('Axes','MaxCycles','MaxRms','Order','Replace','PreFlagZeros')
    axesToFlag = parset.getStringVector('.'.join(["LoSoTo.Steps", step, "Axes"]), 'time' )
    maxCycles = parset.getInt('.'.join(["LoSoTo.Steps", step, "MaxCycles"]), 5 )
    maxRms = parset.getFloat('.'.join(["LoSoTo.Steps", step, "MaxRms"]), 5. )
    fixRms = parset.getFloat('.'.join(["LoSoTo.Steps", step, "FixRms"]), 0 )
    order = parset.getIntVector('.'.join(["LoSoTo.Steps", step, "Order"]), 3 )
    replace = parset.getBool('.'.join(["LoSoTo.Steps", step, "Replace"]), False )
    preflagzeros = parset.getBool('.'.join(["LoSoTo.Steps", step, "PreFlagZeros"]), False )
    mode = parset.getString('.'.join(["LoSoTo.Steps", step, "Mode"]), 'smooth' )
    ref = parset.getString('.'.join(["LoSoTo.Steps", step, "Reference"]), '' )
    ncpu = parset.getInt('.'.join(["LoSoTo.Ncpu"]), 1 )

    if ref == '': ref = None

    if axesToFlag == []:
        logging.error("Please specify axis to flag. It must be a single one.")
        return 1

    if len(axesToFlag) != len(order):
        logging.error("AxesToFlag and order must be both 1 or 2 values.")
        return 1

    if len(order) == 1: order = order[0]
    elif len(order) == 2: order = tuple(order)

    mode = mode.lower()
    if mode != 'smooth' and mode != 'poly' and mode != 'spline':
        logging.error('Mode must be smooth, poly or spline')
        return 1

    for soltab in openSoltabs( H, soltabs ):

        # start processes for multi-thread
        mpm = multiprocManager(ncpu, flag)

        logging.info("Flagging soltab: "+soltab._v_name)

        sf = solFetcher(soltab)
        sw = solWriter(soltab, useCache=True) # remember to flush!

        # axis selection
        userSel = {}
        for axis in sf.getAxesNames():
            userSel[axis] = getParAxis( step, parset, H, axis )
        sf.setSelection(**userSel)

        for axisToFlag in axesToFlag:
            if axisToFlag not in sf.getAxesNames():
                logging.error('Axis \"'+axis+'\" not found.')
                mpm.wait()
                return 1

        # reorder axesToFlag as axes in the table
        axesToFlag_orig = axesToFlag
        axesToFlag = [coord for coord in sf.getAxesNames() if coord in axesToFlag]
        if type(order) is int: order = [order]
        if axesToFlag_orig != axesToFlag: order = order[::-1] # reverse order if we changed axesToFlag

        solType = sf.getType()

        # fill the queue (note that sf and sw cannot be put into a queue since they have file references)
        for vals, weights, coord, selection in sf.getValuesIter(returnAxes=axesToFlag, weight=True, reference=ref):
            mpm.put([vals, weights, coord, solType, order, mode, preflagzeros, maxCycles, fixRms, maxRms, replace, axesToFlag, selection])
            #v, w, sel = flag(vals, weights, coord, solType, order, mode, preflagzeros, maxCycles, fixRms, maxRms, replace, axesToFlag, selection)

        mpm.wait()
        
        for v, w, sel in mpm.get():
            sw.selection = sel
            if replace:
                # rewrite solutions (flagged values are overwritten)
                sw.setValues(v, weight=False)
            else:
                sw.setValues(w, weight=True)
        
        sw.flush()
        sw.addHistory('FLAG (over %s with %s sigma cut)' % (axesToFlag, maxRms))

        del sw
        del sf
        del soltab

    return 0
예제 #20
0
elapsed = (time.clock() - start)
logging.info("PARMDB -- " + str(elapsed) + " s.")

start = time.clock()
for i in xrange(n):
    H.setSelection(dir='pointing', ant='RS*')
    Hrot = H.getValues(retAxesVals=False)
elapsed = (time.clock() - start)
logging.info("H5parm -- " + str(elapsed) + " s.")

#print "Equal?", (Prot == np.squeeze(Hrot)).all()

######################################################
# read+write
logging.info("### Read all rotations for a dir/station and write them back")
Hw = solWriter(H5.getSoltab(solset, 'amplitude000'))

start = time.clock()
for i in xrange(n):
    Prot = P.getValuesGrid('CommonRotationAngle:CS001LBA')
    Prot = {'test' + str(i): Prot['CommonRotationAngle:CS001LBA']}
    P2.addValues(Prot)
    # parmdb write?
elapsed = (time.clock() - start)
logging.info("PARMDB -- " + str(elapsed) + " s.")

start = time.clock()
for i in xrange(n):
    H.setSelection(dir='pointing', ant='CS001LBA')
    Hrot = H.getValues(retAxesVals=False)
    Hw.setSelection(dir='pointing', ant='CS001LBA')
예제 #21
0
def run(step, parset, H):
    """
   Generic unspecified step for easy expansion.
   """
    import numpy as np
    from losoto.h5parm import solFetcher, solWriter
    # all the following are LoSoTo function to extract information from the parset

    # get involved solsets using local step values or global values or all
    solsets = getParSolsets(step, parset, H)
    logging.info('Solset: ' + str(solsets))
    # get involved soltabs using local step values or global values or all
    soltabs = getParSoltabs(step, parset, H)
    logging.info('Soltab: ' + str(soltabs))
    # get list of SolTypes using local step values or global values or all
    solTypes = getParSolTypes(step, parset, H)
    logging.info('SolType: ' + str(solTypes))

    # do something on every soltab (use the openSoltab LoSoTo function)
    for soltab in openSoltabs(H, soltabs):
        logging.info("--> Working on soltab: " + soltab._v_name)
        # use the solFetcher from the H5parm lib
        t = solFetcher(soltab)
        tw = solWriter(soltab)

        axisNames = t.getAxesNames()
        logging.info("Axis names are: " + str(axisNames))

        solType = t.getType()
        logging.info("Soltab type is: " + solType)

        # this will make a selection for the getValues() and getValuesIter()
        # interpret every entry in the parset which has an axis name as a selector
        userSel = {}
        for axis in t.getAxesNames():
            userSel[axis] = getParAxis(step, parset, H, axis)
        t.setSelection(**userSel)

        t.setSelection(ant=ants, pol=pols, dir=dirs)
        logging.info("Selection is: " + str(t.selection))

        # find axis values
        logging.info("Antennas (no selection) are: " +
                     str(t.getAxisValues('ant', ignoreSelection=True)))
        logging.info("Antennas (with selection) are: " +
                     str(t.getAxisValues('ant')))
        # but one can also use (selection is active here!)
        logging.info("Antennas (other method) are: " + str(t.ant))
        logging.info("Frequencies are: " + str(t.freq))
        logging.info("Directions are: " + str(t.dir))
        logging.info("Polarizations are: " + str(t.pol))
        # try to access a non-existent axis
        t.getAxisValues('nonexistantaxis')

        # now get all values given this selection
        logging.info("Get data using t.val")
        val = t.val
        logging.debug('shape of val: ' + str(t.val.shape))
        logging.info("$ val is " + str(val[0, 0, 0, 0, 100]))
        weight = t.weight
        time = t.time
        thisTime = t.time[100]

        # another way to get the data is using the getValues()
        logging.info("Get data using getValues()")
        grid, axes = t.getValues()
        # axis names
        logging.info("Axes: " + str(t.getAxesNames()))
        # axis shape
        print axes
        print[t.getAxisLen(axis) for axis in axes]  # not ordered, is a dict!
        # data array shape (same of axis shape)
        logging.info("Shape of values: " + str(grid.shape))
        #logging.info("$ val is "+str(grid[0,0,0,0,100]))

        # reset selection
        t.setSelection()
        logging.info('Reset selection to \'\'')
        logging.info("Antennas are: " + str(t.ant))
        logging.info("Frequencies are: " + str(t.freq))
        logging.info("Directions are: " + str(t.dir))
        logging.info("Polarizations are: " + str(t.pol))

        # finally the getValuesIter allaws to iterate across all possible combinations of a set of axes
        logging.info('Iteration on time/freq')
        for vals, coord, selection in t.getValuesIter(
                returnAxes=['time', 'freq']):
            # writing back the solutions
            tw.selection = selection
            tw.setValues(vals)
        logging.info('Iteration on time')
        for vals, coord, selection in t.getValuesIter(returnAxes=['time']):
            # writing back the solutions
            tw.selection = selection
            tw.setValues(vals)
        logging.info('Iteration on dir after selection to 1 dir')
        t.setSelection(dir='pointing')
        for vals, coord, selection in t.getValuesIter(returnAxes=['dir']):
            # writing back the solutions
            tw.selection = selection
            tw.setValues(vals)

    return 0  # if everything went fine, otherwise 1
예제 #22
0
def run( step, parset, H ):

    import scipy.ndimage.filters
    import numpy as np
    from losoto.h5parm import solFetcher, solWriter
    from scipy.optimize import minimize
    import itertools
    from scipy.interpolate import griddata
    import scipy.cluster.vq as vq

    def robust_std(data, sigma=3):
        """
        Calculate standard deviation excluding outliers
        ok with masked arrays
        """
        return np.std(data[np.where(np.abs(data) < sigma * np.std(data))])

    def mask_interp(vals, mask, method='nearest'):
        """
        return interpolated values for masked elements
        """
        this_vals = vals.copy()
        #this_vals[mask] = np.interp(np.where(mask)[0], np.where(~mask)[0], vals[~mask])
        #this_vals[mask] = griddata(np.where(~mask)[0], vals[~mask], np.where(mask)[0], method)

        # griddata has nan bug with nearest, I need to use vq
        code, dist = vq.vq(np.where(mask)[0], np.where(~mask)[0])
        this_vals[ np.where(mask)[0] ] = this_vals[code]
        
        return this_vals

    tec_jump_val = 0.019628
    maxsize = 300
    clip = 10 # TECs over these amount of jumps are flagged

    soltabs = getParSoltabs( step, parset, H )

    for soltab in openSoltabs( H, soltabs ):

        logging.info("Removing TEC jumps from soltab: "+soltab._v_name)

        sf = solFetcher(soltab)
        sw = solWriter(soltab) # remember to flush!

        # TODO: check if it's a Tec table

        # axis selection
        userSel = {}
        for axis in sf.getAxesNames():
            userSel[axis] = getParAxis( step, parset, H, axis )
        sf.setSelection(**userSel)

        for vals, weights, coord, selection in sf.getValuesIter(returnAxes='time', weight=True):

            # skip all flagged
            if (weights == 0).all(): continue
            # skip reference
            if (np.diff(vals[(weights == 1)]) == 0).all(): continue

            # kill large values
#            weights[abs(vals/tec_jump_val)>clip] = 0

            # interpolate flagged values to get resonable distances
#            vals = mask_interp(vals, mask=(weights == 0))/tec_jump_val
            # add edges to allow intervals to the borders
#            vals = np.insert(vals, 0, vals[0])
#            vals = np.insert(vals, len(vals), vals[-1])

            vals = np.fmod(vals,tec_jump_val)

#            def find_jumps(d_vals):
#                # jump poistion finder
#                d_smooth = scipy.ndimage.filters.median_filter( mask_interp(d_vals, mask=(abs(d_vals)>0.8)), 21 )
#                d_vals -= d_smooth
#                jumps = list(np.where(np.abs(d_vals) > 1.)[0])
#                return [0]+jumps+[len(d_vals)-1] # add edges
#
#            class Jump(object):
#                def __init__(self, jumps_idx, med):
#                    self.idx_left = jumps_idx[0]
#                    self.idx_right = jumps_idx[1]
#                    self.jump_left = np.rint(d_vals[self.idx_left])
#                    self.jump_right = np.rint(d_vals[self.idx_right])
#                    self.size = self.idx_right-self.idx_left
#                    self.hight = np.median(vals[self.idx_left+1:self.idx_right+1]-med)
#                    if abs((self.hight-self.jump_left)-med) > abs((self.hight-self.jump_right)-med):
#                        self.closejump = self.jump_right
#                    else:
#                        self.closejump = self.jump_left
#    
#            i = 0
#            while i<len(coord['time']):
#                # get tec[i] - tec[i+1], i.e. the derivative assuming constant timesteps
#                # this is in units of tec_jump_val!
#                d_vals = np.diff(vals)
#                # get jumps idx, idx=n means a jump beteen val n and n+1
#                jumps_idx = find_jumps(d_vals)
#
#                # get regions
#                med = np.median(vals)
#                jumps = [Jump(jump_idx, med) for jump_idx in zip( jumps_idx[:-1], jumps_idx[1:] )]
#                jumps = [jump for jump in jumps if jump.closejump != 0]
#                jumps = [jump for jump in jumps if jump.size != 0] # prevent bug on edges
#                jumps = [jump for jump in jumps if jump.size < maxsize]
#
#                jumps.sort(key=lambda x: (np.abs(x.size), x.hight), reverse=False) #smallest first
#                #print [(j.hight, j.closejump) for j in jumps]
#
#                plot = False
#                if plot:
#                    import matplotlib.pyplot as plt
#                    fig, ((ax1, ax2, ax3)) = plt.subplots(3, 1, sharex=True)
#                    fig.subplots_adjust(hspace=0)
#                    d_smooth = scipy.ndimage.filters.median_filter( mask_interp(d_vals, mask=(abs(d_vals)>0.8)), 31 )
#                    ax1.plot(d_vals,'k-')
#                    ax2.plot(d_smooth,'k-')
#                    ax3.plot(vals, 'k-')
#                    [ax3.axvline(jump_idx+0.5, color='r', ls=':') for jump_idx in jumps_idx]
#                    ax1.set_ylabel('d_vals')
#                    ax2.set_ylabel('d_vals - smooth')
#                    ax3.set_ylabel('TEC/jump')
#                    ax3.set_xlabel('timestep')
#                    ax1.set_xlim(xmin=-10, xmax=len(d_smooth)+10)
#                    fig.savefig('plots/%stecjump_debug_%03i' % (coord['ant'], i))
#                i+=1
#
#                if len(jumps) == 0: 
#                    break
#
#                # move down the highest to the side closest to the median
#                j = jumps[0]
#                #print j.idx_left, j.idx_right, j.jump_left, j.jump_right, j.hight, j.closejump
#
#                vals[j.idx_left+1:j.idx_right+1] -= j.closejump
#                logging.debug("%s: Number of jumps left: %i - Removing jump: %i - Size %i" % (coord['ant'], len(jumps_idx)-2, j.closejump, j.size))
                
            # re-create proper vals
#            vals = vals[1:-1]*tec_jump_val
            # set back to 0 the values for flagged data
            vals[weights == 0] = 0

            sw.selection = selection
            sw.setValues(vals)
            sw.setValues(weights, weight=True)

        sw.addHistory('TECJUMP')
        del sf
        del sw
    return 0
예제 #23
0
                    ra = skydb.getDefValues('Ra:' + source)['Ra:' + source][0][0]
                    dec = skydb.getDefValues('Dec:' + source)['Dec:' + source][0][0]
                except KeyError:
                    # Source not found in skymodel parmdb, try to find components
                    logging.warning('Cannot find the source '+source+'. Trying components.')
                    ra = np.array(skydb.getDefValues('Ra:*' + source + '*').values())
                    dec = np.array(skydb.getDefValues('Dec:*' + source + '*').values())
                    if len(ra) == 0 or len(dec) == 0:
                        ra = np.nan
                        dec = np.nan
                        logging.error('Cannot find the source '+source+'. I leave NaNs.')
                    else:
                        ra = ra.mean()
                        dec = dec.mean()
                        logging.info('Found average direction for '+source+' at ra:'+str(ra)+' - dec:'+str(dec))
                vals.append([ra, dec])
        sourceTable.append(zip(*(dirs,vals)))

    logging.info("Total file size: "+str(int(h5parm.H.get_filesize()/1024./1024.))+" M.")

    # Add CREATE entry to history and print summary of tables if verbose
    soltabs = h5parm.getSoltabs(solset=solset)
    for st in soltabs:
        sw = solWriter(soltabs[st])
        sw.addHistory('CREATE (by H5parm_importer.py from %s:%s/%s)' % (socket.gethostname(), os.path.abspath(''), globaldbFile))
    if options.verbose:
        logging.info(str(h5parm))

    del h5parm
    logging.info('Done.')
예제 #24
0
def run( step, parset, H ):
    """
    Separate phase solutions into Clock and TEC.

    Phase solutions are assumed to be stored in solsets of the H5parm file, one
    solset per field.

    The Clock and TEC values are stored in the specified output soltab with type 'clock' and 'tec'.

    """
    import numpy as np
    from losoto.h5parm import solFetcher, solWriter

    # get involved solsets using local step values or global values or all
    soltabs = getParSoltabs( step, parset, H )

    flagBadChannels = parset.getBool('.'.join(["LoSoTo.Steps", step, "FlagBadChannels"]), True )
    flagCut = parset.getFloat('.'.join(["LoSoTo.Steps", step, "FlagCut"]), 5. )
    chi2cut = parset.getFloat('.'.join(["LoSoTo.Steps", step, "Chi2cut"]), 3000. )
    combinePol = parset.getBool('.'.join(["LoSoTo.Steps", step, "CombinePol"]), False )
    #fitOffset = parset.getBool('.'.join(["LoSoTo.Steps", step, "FitOffset"]), False )
    removePhaseWraps=parset.getBool('.'.join(["LoSoTo.Steps", step, "RemovePhaseWraps"]), True )
    fit3rdorder=parset.getBool('.'.join(["LoSoTo.Steps", step, "Fit3rdOrder"]), False )
    circular=parset.getBool('.'.join(["LoSoTo.Steps", step, "Circular"]), False )
    reverse=parset.getBool('.'.join(["LoSoTo.Steps", step, "Reverse"]), False )

    # do something on every soltab (use the openSoltab LoSoTo function)
    #for soltab in openSoltabs( H, soltabs ):
    for soltabname in soltabs:
        solsetname=soltabname.split('/')[0]
        soltab=H.getSoltab(solset=solsetname, soltab=soltabname.split('/')[1])
        logging.info("--> Working on soltab: "+soltab._v_name)
        t = solFetcher(soltab)
        tw = solWriter(soltab)

        # some checks
        solType = t.getType()
        if solType != 'phase':
           logging.warning("Soltab type of "+soltab._v_name+" is: "+solType+" should be phase. Ignoring.")
           continue

        # this will make a selection for the getValues() and getValuesIter()
        userSel = {}
        for axis in t.getAxesNames():
            userSel[axis] = getParAxis( step, parset, H, axis )
        t.setSelection(**userSel)

        # Collect station properties
        station_dict = H.getAnt(solsetname)
        stations = t.getAxisValues('ant')
        station_positions = np.zeros((len(stations), 3), dtype=np.float)
        for i, station_name in enumerate(stations):
            station_positions[i, 0] = station_dict[station_name][0]
            station_positions[i, 1] = station_dict[station_name][1]
            station_positions[i, 2] = station_dict[station_name][2]
            
        returnAxes=['ant','freq','pol','time']
        for vals, flags, coord, selection in t.getValuesIter(returnAxes=returnAxes,weight=True):

            if len(coord['ant']) < 2:
                logging.error('Clock/TEC separation needs at least 2 antennas selected.')
                return 1
            if len(coord['freq']) < 10:
                logging.error('Clock/TEC separation needs at least 10 frequency channels, preferably distributed over a wide range')
                return 1

            freqs=coord['freq']
            stations=coord['ant']
            times=coord['time']

            # get axes index
            axes=[i for i in t.getAxesNames() if i in returnAxes]

            # reverse time axes
            if reverse: 
                vals = np.swapaxes(np.swapaxes(vals, 0, axes.index('time'))[::-1], 0, axes.index('time'))
                flags = np.swapaxes(np.swapaxes(flags, 0, axes.index('time'))[::-1], 0, axes.index('time'))

            result=doFit(vals,flags==0,freqs,stations,station_positions,axes,\
                             flagBadChannels=flagBadChannels,flagcut=flagCut,chi2cut=chi2cut,combine_pol=combinePol,removePhaseWraps=removePhaseWraps,fit3rdorder=fit3rdorder,circular=circular)
            if fit3rdorder:
                clock,tec,offset,tec3rd=result
                if reverse: 
                    clock = clock[::-1,:]
                    tec = tec[::-1,:]
                    tec3rd = tec3rd[::-1,:]
            else:
                clock,tec,offset=result
                if reverse: 
                    clock = clock[::-1,:]
                    tec = tec[::-1,:]

            weights=tec>-5
            tec[np.logical_not(weights)]=0
            clock[np.logical_not(weights)]=0
            weights=np.float16(weights)

            if combinePol:
                tf_st = H.makeSoltab(solsetname, 'tec',
                                 axesNames=['time', 'ant'], axesVals=[times, stations],
                                 vals=tec[:,:,0],
                                 weights=weights[:,:,0])
                sw = solWriter(tf_st)
                sw.addHistory('CREATE (by CLOCKTECFIT operation)')
                tf_st = H.makeSoltab(solsetname, 'clock',
                                 axesNames=['time', 'ant'], axesVals=[times, stations],
                                 vals=clock[:,:,0]*1e-9,
                                 weights=weights[:,:,0])
                sw = solWriter(tf_st)
                sw.addHistory('CREATE (by CLOCKTECFIT operation)')
                tf_st = H.makeSoltab(solsetname, 'phase_offset',
                                 axesNames=['ant'], axesVals=[stations],
                                 vals=offset[:,0],
                                 weights=np.ones_like(offset[:,0],dtype=np.float16))
                sw = solWriter(tf_st)
                sw.addHistory('CREATE (by CLOCKTECFIT operation)')
                if fit3rdorder:
                    tf_st = H.makeSoltab(solsetname, 'tec3rd',
                                         axesNames=['time', 'ant'], axesVals=[times, stations],
                                         vals=tec3rd[:,:,0],
                                         weights=weights[:,:,0])
                    sw = solWriter(tf_st)
            else:
                tf_st = H.makeSoltab(solsetname, 'tec',
                                 axesNames=['time', 'ant','pol'], axesVals=[times, stations, ['XX','YY']],
                                 vals=tec,
                                 weights=weights)
                sw = solWriter(tf_st)
                sw.addHistory('CREATE (by CLOCKTECFIT operation)')
                tf_st = H.makeSoltab(solsetname, 'clock',
                                 axesNames=['time', 'ant','pol'], axesVals=[times, stations, ['XX','YY']],
                                 vals=clock*1e-9,
                                 weights=weights)
                sw = solWriter(tf_st)
                sw.addHistory('CREATE (by CLOCKTECFIT operation)')
                tf_st = H.makeSoltab(solsetname, 'phase_offset',
                                 axesNames=['ant','pol'], axesVals=[stations, ['XX','YY']],
                                 vals=offset,
                                 weights=np.ones_like(offset,dtype=np.float16))
                sw = solWriter(tf_st)
                sw.addHistory('CREATE (by CLOCKTECFIT operation)')
                if fit3rdorder:
                    tf_st = H.makeSoltab(solsetname, 'tec3rd',
                                         axesNames=['time', 'ant','pol'], axesVals=[times, stations, ['XX','YY']],
                                         vals=tec3rd,
                                         weights=weights)
                    sw = solWriter(tf_st)
    return 0
예제 #25
0
def create_h5parm(instrumentdbFiles, antennaFile, fieldFile, skydbFile,
                  h5parmFile, complevel, solsetName, globaldbFile=None, verbose=False):
    """
    Create the h5parm file.
    Input:
       instrumentdbFiles - list of the finenames of the solutions.
       antennaFile - file name of the antenna table.
       fieldFile - file name of the field table.
       skydbFile - file name of the sky table.
       h5parmFile - file name of the h5parm file that will be created.
       complevel - level of compression. It is usually 5.
       solsetName - Name of the solution set. Usually "sol###".
       globaldbFile (optional) - Name of the globaldbFile. Used only for 
         logging purposes.
    """
    
    # open/create the h5parm file and the solution-set
    h5parm = h5parm2(h5parmFile, readonly = False, complevel = complevel)
    
    solset = h5parm.makeSolset(solsetName)
    
    # Create tables using the first instrumentdb
    # TODO: all the instrument tables should be checked
    pdb = lofar.parmdb.parmdb(instrumentdbFiles[0])

    solTypes = list(set(x[0] for x in  (x.split(":") for x in pdb.getNames())))
    logging.info('Found solution types: '+', '.join(solTypes))

    # rewrite solTypes in order to put together
    # Gain <-> DirectionalGain
    # CommonRotationAngle <-> RotationAngle
    # CommonScalarPhase <-> ScalarPhase
    # CommonScalarAmplitude <-> ScalarAmplitude
    # it also separate Real/Imag/Ampl/Phase into different solTypes
    if "Gain" in solTypes:
        solTypes.remove('Gain')
        solTypes.append('*Gain:*:Real')
        solTypes.append('*Gain:*:Imag')
        solTypes.append('*Gain:*:Ampl')
        solTypes.append('*Gain:*:Phase')
    if "DirectionalGain" in solTypes:
        solTypes.remove('DirectionalGain')
        solTypes.append('*Gain:*:Real')
        solTypes.append('*Gain:*:Imag')
        solTypes.append('*Gain:*:Ampl')
        solTypes.append('*Gain:*:Phase')
    if "RotationAngle" in solTypes:
        solTypes.remove('RotationAngle')
        solTypes.append('*RotationAngle')
    if "CommonRotationAngle" in solTypes:
        solTypes.remove('CommonRotationAngle')
        solTypes.append('*RotationAngle')
    if "RotationMeasure" in solTypes:
        solTypes.remove('RotationMeasure')
        solTypes.append('*RotationMeasure')
    if "ScalarPhase" in solTypes:
        solTypes.remove('ScalarPhase')
        solTypes.append('*ScalarPhase')
    if "CommonScalarPhase" in solTypes:
        solTypes.remove('CommonScalarPhase')
        solTypes.append('*ScalarPhase')
    if "CommonScalarAmplitude" in solTypes:
        solTypes.remove('CommonScalarAmplitude')
        solTypes.append('*ScalarAmplitude')
    solTypes = list(set(solTypes))

    # every soltype creates a different solution-table
    for solType in solTypes:

        # skip missing solTypes (not all parmdbs have e.g. TEC)
        if len(pdb.getNames(solType+':*')) == 0: continue

        pols = set(); dirs = set(); ants = set();
        freqs = set(); times = set(); ptype = set()

        logging.info('Reading '+solType+'.')

        pbar = progressbar.ProgressBar(maxval=len(instrumentdbFiles)).start()
        ipbar = 0

        for instrumentdbFile in sorted(instrumentdbFiles):

            pdb = lofar.parmdb.parmdb(instrumentdbFile)

            # create the axes grid, necessary if not all entries have the same axes lenght
            data = pdb.getValuesGrid(solType+':*')

            # check good instrument table
            if len(data) == 0:
                logging.error('Instrument table %s is empty, ignoring.' % instrumentdbFile)

            for solEntry in data:

                pol, dir, ant, parm = parmdbToAxes(solEntry)
                if pol != None: pols |= set([pol])
                if dir != None: dirs |= set([dir])
                if ant != None: ants |= set([ant])
                freqs |= set(data[solEntry]['freqs'])
                times |= set(data[solEntry]['times'])
                pbar.update(ipbar)
            ipbar += 1

        pbar.finish()

        pols = np.sort(list(pols)); dirs = np.sort(list(dirs)); ants = np.sort(list(ants)); freqs = np.sort(list(freqs)); times = np.sort(list(times))
        shape = [i for i in (len(pols), len(dirs), len(ants), len(freqs), len(times)) if i != 0]
        vals = np.empty(shape)
        vals[:] = np.nan
        weights = np.zeros(shape, dtype=np.float16)

        logging.info('Filling table.')
        pbar = progressbar.ProgressBar(maxval=len(instrumentdbFiles)).start()
        ipbar = 0

        for instrumentdbFile in instrumentdbFiles:

            pdb = lofar.parmdb.parmdb(instrumentdbFile)

            # fill the values
            data = pdb.getValuesGrid(solType+':*')
            if 'Real' in solType: dataIm = pdb.getValuesGrid(solType.replace('Real','Imag')+':*')
            if 'Imag' in solType: dataRe = pdb.getValuesGrid(solType.replace('Imag','Real')+':*')
            for solEntry in data:

                pol, dir, ant, parm = parmdbToAxes(solEntry)
                ptype |= set([solEntry.split(':')[0]]) # original parmdb solution type

                freq = data[solEntry]['freqs']
                time = data[solEntry]['times']

                val = data[solEntry]['values']

                # convert Real and Imag in Amp and Phase respectively
                if parm == 'Real':
                    solEntryIm = solEntry.replace('Real','Imag')
                    valI = dataIm[solEntryIm]['values']
                    val = np.sqrt((val**2)+(valI**2))
                if parm == 'Imag':
                    solEntryRe = solEntry.replace('Imag','Real')
                    valR = dataRe[solEntryRe]['values']
                    val = np.arctan2(val, valR)

                coords = []
                if pol != None:
                    polCoord = np.searchsorted(pols, pol)
                    coords.append(polCoord)
                if dir != None:
                    dirCoord = np.searchsorted(dirs, dir)
                    coords.append(dirCoord)
                if ant != None:
                    antCoord = np.searchsorted(ants, ant)
                    coords.append(antCoord)
                freqCoord = np.searchsorted(freqs, freq)
                timeCoord = np.searchsorted(times, time)
                vals[tuple(coords)][np.ix_(freqCoord,timeCoord)] = val.T
                weights[tuple(coords)][np.ix_(freqCoord,timeCoord)] = 1
                pbar.update(ipbar)
            ipbar += 1

        np.putmask(vals, ~np.isfinite(vals), 0) # put inf and nans to 0
        #vals = np.nan_to_num(vals) # replace nans with 0 (flagged later)

        pbar.finish()
        if solType == '*RotationAngle':
            np.putmask(weights, vals == 0., 0) # flag where val=0
            h5parm.makeSoltab(solset, 'rotation', axesNames=['dir','ant','freq','time'], \
                    axesVals=[dirs,ants,freqs,times], vals=vals, weights=weights, parmdbType=', '.join(list(ptype)))
        if solType == '*RotationMeasure':
            np.putmask(weights, vals == 0., 0) # flag where val=0
            h5parm.makeSoltab(solset, 'rotationmeasure', axesNames=['dir','ant','freq','time'], \
                    axesVals=[dirs,ants,freqs,times], vals=vals, weights=weights, parmdbType=', '.join(list(ptype)))
        elif solType == '*ScalarPhase':
            np.putmask(weights, vals == 0., 0)
            h5parm.makeSoltab(solset, 'scalarphase', axesNames=['dir','ant','freq','time'], \
                    axesVals=[dirs,ants,freqs,times], vals=vals, weights=weights, parmdbType=', '.join(list(ptype)))
        elif solType == '*ScalarAmplitude':
            np.putmask(weights, vals == 0., 0)
            h5parm.makeSoltab(solset, 'scalaramplitude', axesNames=['dir','ant','freq','time'], \
                    axesVals=[dirs,ants,freqs,times], vals=vals, weights=weights, parmdbType=', '.join(list(ptype)))
        elif solType == 'Clock':
            np.putmask(weights, vals == 0., 0)
            # clock may be diag or scalar
            if len(pols) == 0:
                h5parm.makeSoltab(solset, 'clock', axesNames=['ant','freq','time'], \
                    axesVals=[ants,freqs,times], vals=vals, weights=weights, parmdbType=', '.join(list(ptype)))
            else:
                h5parm.makeSoltab(solset, 'clock', axesNames=['pol','ant','freq','time'], \
                    axesVals=[pol,ants,freqs,times], vals=vals, weights=weights, parmdbType=', '.join(list(ptype)))
        elif solType == 'TEC':
            np.putmask(weights, vals == 0., 0)
            # tec may be diag or scalar
            if len(pols) == 0:
                h5parm.makeSoltab(solset, 'tec', axesNames=['dir','ant','freq','time'], \
                    axesVals=[dirs,ants,freqs,times], vals=vals, weights=weights, parmdbType=', '.join(list(ptype)))
            else:
                h5parm.makeSoltab(solset, 'tec', axesNames=['pol','dir','ant','freq','time'], \
                    axesVals=[pols,dirs,ants,freqs,times], vals=vals, weights=weights, parmdbType=', '.join(list(ptype)))
        elif solType == '*Gain:*:Real' or solType == '*Gain:*:Ampl':
            np.putmask(vals, vals == 0, 1) # nans were put to 0 before, set them to 1
            np.putmask(weights, vals == 1., 0) # flag where val=1
            h5parm.makeSoltab(solset, 'amplitude', axesNames=['pol','dir','ant','freq','time'], \
                    axesVals=[pols,dirs,ants,freqs,times], vals=vals, weights=weights, parmdbType=', '.join(list(ptype)))
        elif solType == '*Gain:*:Imag' or solType == '*Gain:*:Phase':
            np.putmask(weights, vals == 0., 0) # falg where val=0
            h5parm.makeSoltab(solset, 'phase', axesNames=['pol','dir','ant','freq','time'], \
                    axesVals=[pols,dirs,ants,freqs,times], vals=vals, weights=weights, parmdbType=', '.join(list(ptype)))

        logging.info('Flagged data: %.3f%%' % (100.*(len(weights.flat)-np.count_nonzero(weights))/len(weights.flat)))

    logging.info('Collecting information from the ANTENNA table.')
    antennaTable = pt.table(antennaFile, ack=False)
    antennaNames = antennaTable.getcol('NAME')
    antennaPositions = antennaTable.getcol('POSITION')
    antennaTable.close()
    antennaTable = solset._f_get_child('antenna')
    antennaTable.append(zip(*(antennaNames,antennaPositions)))

    logging.info('Collecting information from the FIELD table.')
    fieldTable = pt.table(fieldFile, ack=False)
    phaseDir = fieldTable.getcol('PHASE_DIR')
    pointing = phaseDir[0, 0, :]
    fieldTable.close()

    sourceTable = solset._f_get_child('source')
    # add the field centre, that is also the direction for Gain and CommonRotationAngle
    sourceTable.append([('pointing',pointing)])

    dirs = []
    for tab in solset._v_children:
        c = solset._f_getChild(tab)
        if c._v_name != 'antenna' and c._v_name != 'source':
            if c.__contains__('dir'):
                dirs.extend(list(set(c.dir)))
    # remove duplicates
    dirs = list(set(dirs))
    # remove any pointing (already in the table)
    if 'pointing' in dirs:
        dirs.remove('pointing')

    if not os.path.isdir(skydbFile) and dirs!=[]:
        logging.critical('Missing skydb table.')
        sys.exit(1)

    if dirs != []:
        logging.info('Collecting information from the sky table.')
        sourceFile = skydbFile + '/SOURCES'
        src_table = pt.table(sourceFile, ack=False)
        sub_tables = src_table.getsubtables()
        vals = []
        ra = dec = np.nan
        has_patches_subtable = False
        for sub_table in sub_tables:
            if 'PATCHES' in sub_table:
                has_patches_subtable = True
        if has_patches_subtable:
            # Read values from PATCHES subtable
            src_table.close()
            sourceFile = skydbFile + '/SOURCES/PATCHES'
            src_table = pt.table(sourceFile, ack=False)
            patch_names = src_table.getcol('PATCHNAME')
            patch_ras = src_table.getcol('RA')
            patch_decs = src_table.getcol('DEC')
            for source in dirs:
                try:
                    patch_indx = patch_names.index(source)
                    ra = patch_ras[patch_indx]
                    dec = patch_decs[patch_indx]
                except ValueError:
                    ra = np.nan
                    dec = np.nan
                    logging.error('Cannot find the source '+source+'. I leave NaNs.')
                vals.append([ra, dec])
            src_table.close()
        else:
            # Try to read default values from parmdb instead
            skydb = lofar.parmdb.parmdb(skydbFile)
            vals = []
            ra = dec = np.nan

            for source in dirs:
                try:
                    ra = skydb.getDefValues('Ra:' + source)['Ra:' + source][0][0]
                    dec = skydb.getDefValues('Dec:' + source)['Dec:' + source][0][0]
                except KeyError:
                    # Source not found in skymodel parmdb, try to find components
                    logging.warning('Cannot find the source '+source+'. Trying components.')
                    ra = np.array(skydb.getDefValues('Ra:*' + source + '*').values())
                    dec = np.array(skydb.getDefValues('Dec:*' + source + '*').values())
                    if len(ra) == 0 or len(dec) == 0:
                        ra = np.nan
                        dec = np.nan
                        logging.error('Cannot find the source '+source+'. I leave NaNs.')
                    else:
                        ra = ra.mean()
                        dec = dec.mean()
                        logging.info('Found average direction for '+source+' at ra:'+str(ra)+' - dec:'+str(dec))
                vals.append([ra, dec])
        sourceTable.append(zip(*(dirs,vals)))

    logging.info("Total file size: "+str(int(h5parm.H.get_filesize()/1024./1024.))+" M.")

    # Add CREATE entry to history and print summary of tables if verbose
    soltabs = h5parm.getSoltabs(solset=solset)
    for st in soltabs:
        sw = solWriter(soltabs[st])
        if globaldbFile is None:
            sw.addHistory('CREATE (by H5parm_importer.py from %s:%s/%s)' % (socket.gethostname(), os.path.abspath(''), "manual list"))
        else:
            sw.addHistory('CREATE (by H5parm_importer.py from %s:%s/%s)' % (socket.gethostname(), os.path.abspath(''), globaldbFile))
    if verbose:
        logging.info(str(h5parm))

    del h5parm
    logging.info('Done.')    
예제 #26
0
def create_h5parm(instrumentdbFiles, antennaFile, fieldFile, skydbFile,
                  h5parmFile, complevel, solsetName, globaldbFile=None, verbose=False):
    """
    Create the h5parm file.
    Input:
       instrumentdbFiles - list of the finenames of the solutions.
       antennaFile - file name of the antenna table.
       fieldFile - file name of the field table.
       skydbFile - file name of the sky table.
       h5parmFile - file name of the h5parm file that will be created.
       complevel - level of compression. It is usually 5.
       solsetName - Name of the solution set. Usually "sol###".
       globaldbFile (optional) - Name of the globaldbFile. Used only for 
         logging purposes.
    """
    
    # open/create the h5parm file and the solution-set
    h5parm = h5parm_mod(h5parmFile, readonly = False, complevel = complevel)
    
    solset = h5parm.makeSolset(solsetName)
    
    # Create tables using the first instrumentdb
    # TODO: all the instrument tables should be checked
    pdb = lofar.parmdb.parmdb(instrumentdbFiles[0])

    solTypes = list(set(x[0] for x in  (x.split(":") for x in pdb.getNames())))
    logging.info('Found solution types: '+', '.join(solTypes))

    # rewrite solTypes in order to put together
    # Gain <-> DirectionalGain
    # CommonRotationAngle <-> RotationAngle
    # CommonScalarPhase <-> ScalarPhase
    # CommonScalarAmplitude <-> ScalarAmplitude
    # it also separate Real/Imag/Ampl/Phase into different solTypes
    if "Gain" in solTypes:
        solTypes.remove('Gain')
        solTypes.append('*Gain:*:Real')
        solTypes.append('*Gain:*:Imag')
        solTypes.append('*Gain:*:Ampl')
        solTypes.append('*Gain:*:Phase')
    if "DirectionalGain" in solTypes:
        solTypes.remove('DirectionalGain')
        solTypes.append('*Gain:*:Real')
        solTypes.append('*Gain:*:Imag')
        solTypes.append('*Gain:*:Ampl')
        solTypes.append('*Gain:*:Phase')
    if "RotationAngle" in solTypes:
        solTypes.remove('RotationAngle')
        solTypes.append('*RotationAngle')
    if "CommonRotationAngle" in solTypes:
        solTypes.remove('CommonRotationAngle')
        solTypes.append('*RotationAngle')
    if "RotationMeasure" in solTypes:
        solTypes.remove('RotationMeasure')
        solTypes.append('*RotationMeasure')
    if "ScalarPhase" in solTypes:
        solTypes.remove('ScalarPhase')
        solTypes.append('*ScalarPhase')
    if "CommonScalarPhase" in solTypes:
        solTypes.remove('CommonScalarPhase')
        solTypes.append('*ScalarPhase')
    if "CommonScalarAmplitude" in solTypes:
        solTypes.remove('CommonScalarAmplitude')
        solTypes.append('*ScalarAmplitude')
    solTypes = list(set(solTypes))

    # every soltype creates a different solution-table
    for solType in solTypes:

        # skip missing solTypes (not all parmdbs have e.g. TEC)
        if len(pdb.getNames(solType+':*')) == 0: continue

        pols = set() 
        dirs = set() 
        ants = set()
        freqs = set() 
        times = set() 
        ptype = set()

        logging.info('Reading '+solType+'.')

        pbar = progressbar.ProgressBar(maxval=len(instrumentdbFiles)).start()
        ipbar = 0

        for instrumentdbFile in sorted(instrumentdbFiles):

            pdb = lofar.parmdb.parmdb(instrumentdbFile)

            # create the axes grid, necessary if not all entries have the same axes lenght
            data = pdb.getValuesGrid(solType+':*')

            # check good instrument table
            if len(data) == 0:
                logging.error('Instrument table %s is empty, ignoring.' % instrumentdbFile)

            for solEntry in data:

                pol, dir, ant, parm = parmdbToAxes(solEntry)
                if pol is not None: pols |= set([pol])
                if dir is not None: dirs |= set([dir])
                if ant is not None: ants |= set([ant])
                freqs |= set(data[solEntry]['freqs'])
                times |= set(data[solEntry]['times'])
                pbar.update(ipbar)
            ipbar += 1

        pbar.finish()

        pols = np.sort(list(pols)) 
        dirs = np.sort(list(dirs)) 
        ants = np.sort(list(ants)) 
        freqs = np.sort(list(freqs)) 
        times = np.sort(list(times))
        shape = [i for i in (len(pols), len(dirs), len(ants), len(freqs), len(times)) if i != 0]
        vals = np.empty(shape)
        vals[:] = np.nan
        weights = np.zeros(shape, dtype=np.float16)

        logging.info('Filling table.')
        pbar = progressbar.ProgressBar(maxval=len(instrumentdbFiles)).start()
        ipbar = 0

        for instrumentdbFile in instrumentdbFiles:

            pdb = lofar.parmdb.parmdb(instrumentdbFile)

            # fill the values
            data = pdb.getValuesGrid(solType+':*')
            if 'Real' in solType: dataIm = pdb.getValuesGrid(solType.replace('Real','Imag')+':*')
            if 'Imag' in solType: dataRe = pdb.getValuesGrid(solType.replace('Imag','Real')+':*')
            for solEntry in data:

                pol, dir, ant, parm = parmdbToAxes(solEntry)
                ptype |= set([solEntry.split(':')[0]]) # original parmdb solution type

                freq = data[solEntry]['freqs']
                time = data[solEntry]['times']

                val = data[solEntry]['values']

                # convert Real and Imag in Amp and Phase respectively
                if parm == 'Real':
                    solEntryIm = solEntry.replace('Real','Imag')
                    valI = dataIm[solEntryIm]['values']
                    val = np.sqrt((val**2)+(valI**2))
                if parm == 'Imag':
                    solEntryRe = solEntry.replace('Imag','Real')
                    valR = dataRe[solEntryRe]['values']
                    val = np.arctan2(val, valR)

                coords = []
                if pol is not None:
                    polCoord = np.searchsorted(pols, pol)
                    coords.append(polCoord)
                if dir is not None:
                    dirCoord = np.searchsorted(dirs, dir)
                    coords.append(dirCoord)
                if ant is not None:
                    antCoord = np.searchsorted(ants, ant)
                    coords.append(antCoord)
                freqCoord = np.searchsorted(freqs, freq)
                timeCoord = np.searchsorted(times, time)
                vals[tuple(coords)][np.ix_(freqCoord,timeCoord)] = val.T
                weights[tuple(coords)][np.ix_(freqCoord,timeCoord)] = 1
                pbar.update(ipbar)
            ipbar += 1

        np.putmask(vals, ~np.isfinite(vals), 0) # put inf and nans to 0
        #vals = np.nan_to_num(vals) # replace nans with 0 (flagged later)

        pbar.finish()
        if solType == '*RotationAngle':
            np.putmask(weights, vals == 0., 0) # flag where val=0
            h5parm.makeSoltab(solset, 'rotation', axesNames=['dir','ant','freq','time'], \
                    axesVals=[dirs,ants,freqs,times], vals=vals, weights=weights, parmdbType=', '.join(list(ptype)))
        if solType == '*RotationMeasure':
            np.putmask(weights, vals == 0., 0) # flag where val=0
            h5parm.makeSoltab(solset, 'rotationmeasure', axesNames=['dir','ant','freq','time'], \
                    axesVals=[dirs,ants,freqs,times], vals=vals, weights=weights, parmdbType=', '.join(list(ptype)))
        elif solType == '*ScalarPhase':
            np.putmask(weights, vals == 0., 0)
            h5parm.makeSoltab(solset, 'scalarphase', axesNames=['dir','ant','freq','time'], \
                    axesVals=[dirs,ants,freqs,times], vals=vals, weights=weights, parmdbType=', '.join(list(ptype)))
        elif solType == '*ScalarAmplitude':
            np.putmask(weights, vals == 0., 0)
            h5parm.makeSoltab(solset, 'scalaramplitude', axesNames=['dir','ant','freq','time'], \
                    axesVals=[dirs,ants,freqs,times], vals=vals, weights=weights, parmdbType=', '.join(list(ptype)))
        elif solType == 'Clock':
            np.putmask(weights, vals == 0., 0)
            # clock may be diag or scalar
            if len(pols) == 0:
                h5parm.makeSoltab(solset, 'clock', axesNames=['ant','freq','time'], \
                    axesVals=[ants,freqs,times], vals=vals, weights=weights, parmdbType=', '.join(list(ptype)))
            else:
                h5parm.makeSoltab(solset, 'clock', axesNames=['pol','ant','freq','time'], \
                    axesVals=[pol,ants,freqs,times], vals=vals, weights=weights, parmdbType=', '.join(list(ptype)))
        elif solType == 'TEC':
            np.putmask(weights, vals == 0., 0)
            # tec may be diag or scalar
            if len(pols) == 0:
                h5parm.makeSoltab(solset, 'tec', axesNames=['dir','ant','freq','time'], \
                    axesVals=[dirs,ants,freqs,times], vals=vals, weights=weights, parmdbType=', '.join(list(ptype)))
            else:
                h5parm.makeSoltab(solset, 'tec', axesNames=['pol','dir','ant','freq','time'], \
                    axesVals=[pols,dirs,ants,freqs,times], vals=vals, weights=weights, parmdbType=', '.join(list(ptype)))
        elif solType == '*Gain:*:Real' or solType == '*Gain:*:Ampl':
            np.putmask(vals, vals == 0, 1) # nans were put to 0 before, set them to 1
            np.putmask(weights, vals == 1., 0) # flag where val=1
            h5parm.makeSoltab(solset, 'amplitude', axesNames=['pol','dir','ant','freq','time'], \
                    axesVals=[pols,dirs,ants,freqs,times], vals=vals, weights=weights, parmdbType=', '.join(list(ptype)))
        elif solType == '*Gain:*:Imag' or solType == '*Gain:*:Phase':
            np.putmask(weights, vals == 0., 0) # falg where val=0
            h5parm.makeSoltab(solset, 'phase', axesNames=['pol','dir','ant','freq','time'], \
                    axesVals=[pols,dirs,ants,freqs,times], vals=vals, weights=weights, parmdbType=', '.join(list(ptype)))

        logging.info('Flagged data: %.3f%%' % (100.*(len(weights.flat)-np.count_nonzero(weights))/len(weights.flat)))

    logging.info('Collecting information from the ANTENNA table.')
    antennaTable = pt.table(antennaFile, ack=False)
    antennaNames = antennaTable.getcol('NAME')
    antennaPositions = antennaTable.getcol('POSITION')
    antennaTable.close()
    antennaTable = solset._f_get_child('antenna')
    antennaTable.append(zip(*(antennaNames,antennaPositions)))

    logging.info('Collecting information from the FIELD table.')
    fieldTable = pt.table(fieldFile, ack=False)
    phaseDir = fieldTable.getcol('PHASE_DIR')
    pointing = phaseDir[0, 0, :]
    fieldTable.close()

    sourceTable = solset._f_get_child('source')
    # add the field centre, that is also the direction for Gain and CommonRotationAngle
    sourceTable.append([('pointing',pointing)])

    dirs = []
    for tab in solset._v_children:
        c = solset._f_get_child(tab)
        if c._v_name != 'antenna' and c._v_name != 'source':
            if c.__contains__('dir'):
                dirs.extend(list(set(c.dir)))
    # remove duplicates
    dirs = list(set(dirs))
    # remove any pointing (already in the table)
    if 'pointing' in dirs:
        dirs.remove('pointing')

    if not os.path.isdir(skydbFile) and dirs!=[]:
        logging.critical('Missing skydb table.')
        sys.exit(1)

    if dirs != []:
        logging.info('Collecting information from the sky table.')
        sourceFile = skydbFile + '/SOURCES'
        src_table = pt.table(sourceFile, ack=False)
        sub_tables = src_table.getsubtables()
        vals = []
        ra = dec = np.nan
        has_patches_subtable = False
        for sub_table in sub_tables:
            if 'PATCHES' in sub_table:
                has_patches_subtable = True
        if has_patches_subtable:
            # Read values from PATCHES subtable
            src_table.close()
            sourceFile = skydbFile + '/SOURCES/PATCHES'
            src_table = pt.table(sourceFile, ack=False)
            patch_names = src_table.getcol('PATCHNAME')
            patch_ras = src_table.getcol('RA')
            patch_decs = src_table.getcol('DEC')
            for source in dirs:
                try:
                    patch_indx = patch_names.index(source)
                    ra = patch_ras[patch_indx]
                    dec = patch_decs[patch_indx]
                except ValueError:
                    ra = np.nan
                    dec = np.nan
                    logging.error('Cannot find the source '+source+'. I leave NaNs.')
                vals.append([ra, dec])
            src_table.close()
        else:
            # Try to read default values from parmdb instead
            skydb = lofar.parmdb.parmdb(skydbFile)
            vals = []
            ra = dec = np.nan

            for source in dirs:
                try:
                    ra = skydb.getDefValues('Ra:' + source)['Ra:' + source][0][0]
                    dec = skydb.getDefValues('Dec:' + source)['Dec:' + source][0][0]
                except KeyError:
                    # Source not found in skymodel parmdb, try to find components
                    logging.warning('Cannot find the source '+source+'. Trying components.')
                    ra = np.array(skydb.getDefValues('Ra:*' + source + '*').values())
                    dec = np.array(skydb.getDefValues('Dec:*' + source + '*').values())
                    if len(ra) == 0 or len(dec) == 0:
                        ra = np.nan
                        dec = np.nan
                        logging.error('Cannot find the source '+source+'. I leave NaNs.')
                    else:
                        ra = ra.mean()
                        dec = dec.mean()
                        logging.info('Found average direction for '+source+' at ra:'+str(ra)+' - dec:'+str(dec))
                vals.append([ra, dec])
        sourceTable.append(zip(*(dirs,vals)))

    logging.info("Total file size: "+str(int(h5parm.H.get_filesize()/1024./1024.))+" M.")

    # Add CREATE entry to history and print summary of tables if verbose
    soltabs = h5parm.getSoltabs(solset=solset)
    for st in soltabs:
        sw = solWriter(soltabs[st])
        if globaldbFile is None:
            sw.addHistory('CREATE (by H5parm_importer.py from %s:%s/%s)' % (socket.gethostname(), os.path.abspath(''), "manual list"))
        else:
            sw.addHistory('CREATE (by H5parm_importer.py from %s:%s/%s)' % (socket.gethostname(), os.path.abspath(''), globaldbFile))
    if verbose:
        logging.info(str(h5parm))

    del h5parm
    logging.info('Done.')    
예제 #27
0
def run( step, parset, H ):
    """
    Fits a screen to TEC values derived by the TECFIT operation.

    The TEC values are read from the specified tec soltab.

    The results of the fit are stored in the specified tecscreen solution table.
    These values are the screen TEC values per station per pierce point per
    solution interval. The pierce point locations are stored in an auxiliary
    array in the output solution table.

    TEC screens can be plotted with the PLOT operation by setting PlotType =
    TECScreen.

    The H5parm_exporter.py tool can be used to export the screen to a parmdb
    that BBS and the AWimager can use. Note, however, that the output screens
    are not normalized properly (any normalization was lost due to the use of
    source-to-source phase gradients in the TECFIT operation). Therefore, a
    direction-independent calibration must be done after exporting the screens
    to a parmdb file, with the following settings in the BBS solve step:

        Model.Ionosphere.Enable = T
        Model.Ionosphere.Type = EXPION
    """
    import numpy as np
    import re
    from losoto.h5parm import solFetcher, solWriter
    # Switch to the Agg backend to prevent problems with pylab imports when
    # DISPLAY env. variable is not set
    import os
    if 'DISPLAY' not in os.environ:
        import matplotlib
        matplotlib.use("Agg")

    soltabs = getParSoltabs( step, parset, H )
    outSoltabs = parset.getStringVector('.'.join(["LoSoTo.Steps", step, "OutSoltab"]), [] )
    height = np.array(parset.getDoubleVector('.'.join(["LoSoTo.Steps", step, "Height"]), [200e3] ))
    order = int(parset.getString('.'.join(["LoSoTo.Steps", step, "Order"]), '15' ))

    # Load TEC values from TECFIT operation
    indx = 0
    for soltab in openSoltabs(H, soltabs):
        if 'tec' not in soltab._v_title:
            logging.warning('No TECFIT solution tables found for solution table '
                '{0}'.format(soltabs[indx]))
            continue
            indx += 1
        solset = soltabs[indx].split('/')[0]
        logging.info('Using input solution table: {0}'.format(soltabs[indx]))
        logging.info('Using output solution table: {0}'.format(outSoltabs[indx]))

        # Collect station and source names and positions and times, making sure
        # that they are ordered correctly.
        t = solFetcher(soltab)
        r, axis_vals = t.getValues()
        source_names = axis_vals['dir']
        source_dict = H.getSou(solset)
        source_positions = []
        for source in source_names:
            source_positions.append(source_dict[source])
        station_names = axis_vals['ant']
        station_dict = H.getAnt(solset)
        station_positions = []
        for station in station_names:
            station_positions.append(station_dict[station])
        times = axis_vals['time']

        # Get sizes
        N_sources = len(source_names)
        N_times = len(times)
        N_stations = len(station_names)
        N_piercepoints = N_sources * N_stations
        rr = np.reshape(r.transpose([0, 2, 1]), [N_piercepoints, N_times])

        heights = list(set(np.linspace(height[0], height[-1], 5)))
        heights.sort()
        if len(heights) > 1:
            logging.info('Trying range of heights: {0} m'.format(heights))
        for i, height in enumerate(heights):
            # Find pierce points and airmass values for given screen height
            logging.info('Using height = {0} m and order = {1}'.format(height, order))
            if height < 100e3:
                logging.warning("Height is less than 100e3 m.")
            pp, airmass = calculate_piercepoints(np.array(station_positions),
                np.array(source_positions), np.array(times), height)

            # Fit a TEC screen
            r_0 = 10e3
            beta = 5.0 / 3.0
            tec_screen, residual = fit_screen_to_tec(station_names, source_names,
                pp, airmass, rr, times, height, order, r_0, beta)
            total_resid = np.sum(np.abs(residual))
            if i > 0:
                if total_resid < best_resid:
                    tec_screen_best = tec_screen
                    pp_best = pp
                    height_best = height
                    best_resid = total_resid
            else:
                tec_screen_best = tec_screen
                pp_best = pp
                height_best = height
                best_resid = total_resid
            if len(heights) > 1:
                logging.info('Total residual for fit: {0}\n'.format(total_resid))

        # Use screen with lowest total residual
        if len(heights) > 1:
            tec_screen = tec_screen_best
            pp = pp_best
            height = height_best
            logging.info('Using height (with lowest total residual) of {0} m'.format(height))

        # Write the results to the output solset
        dirs_out = source_names
        times_out = times
        ants_out = station_names

        # Make output tecscreen table
        outSolset = outSoltabs[indx].split('/')[0]
        outSoltab = outSoltabs[indx].split('/')[1]
        if not outSolset in H.getSolsets().keys():
            solsetTEC = H.makeSolset(outSolset)
            dirs_pos = source_positions
            sourceTable = solsetTEC._f_get_child('source')
            sourceTable.append(zip(*(dirs_out, dirs_pos)))
            ants_pos = station_positions
            antennaTable = solsetTEC._f_get_child('antenna')
            antennaTable.append(zip(*(ants_out, ants_pos)))

        # Store tecscreen values. The residual values are stored in the weights
        # table. Flagged values of the screen have weights set to 0.0.
        vals = tec_screen.transpose([1, 0, 2])
        weights = residual.transpose([1, 0, 2])
        tec_screen_st = H.makeSoltab(outSolset, 'tecscreen', outSoltab,
            axesNames=['dir', 'time', 'ant'], axesVals=[dirs_out, times_out,
            ants_out], vals=vals, weights=weights)

        # Store beta, r_0, height, and order as attributes of the tecscreen
        # soltab
        tec_screen_st._v_attrs['beta'] = beta
        tec_screen_st._v_attrs['r_0'] = r_0
        tec_screen_st._v_attrs['height'] = height
        tec_screen_st._v_attrs['order'] = order

        # Make output piercepoint table
        tec_screen_solset = tec_screen_st._v_parent._v_name
        H.H.create_carray('/'+tec_screen_solset+'/'+tec_screen_st._v_name,
            'piercepoint', obj=pp)

        # Add histories
        sw = solWriter(tec_screen_st)
        sw.addHistory('CREATE (by TECSCREEN operation)')
        indx += 1

    return 0
예제 #28
0
파일: clip.py 프로젝트: twshimwell/losoto
def run(step, parset, H):

    import numpy as np
    from losoto.h5parm import solFetcher, solWriter

    soltabs = getParSoltabs(step, parset, H)

    axesToClip = parset.getStringVector(
        '.'.join(["LoSoTo.Steps", step, "Axes"]), [])
    clipLevel = parset.getFloat('.'.join(["LoSoTo.Steps", step, "ClipLevel"]),
                                0.)
    log = parset.getBool('.'.join(["LoSoTo.Steps", step, "Log"]), True)

    if len(axesToClip) < 1:
        logging.error("Please specify axes to clip.")
        return 1
    if clipLevel == 0.:
        logging.error(
            "Please specify factor above/below median at which to clip.")
        return 1

    for soltab in openSoltabs(H, soltabs):

        logging.info("Clipping soltab: " + soltab._v_name)

        sf = solFetcher(soltab)

        # axis selection
        userSel = {}
        for axis in sf.getAxesNames():
            userSel[axis] = getParAxis(step, parset, H, axis)
        sf.setSelection(**userSel)

        # some checks
        for i, axis in enumerate(axesToClip[:]):
            if axis not in sf.getAxesNames():
                del axesToClip[i]
                logging.warning('Axis \"' + axis + '\" not found. Ignoring.')

        if sf.getType() != 'amplitude':
            logging.error('CLIP is for "amplitude" tables, not %s.' %
                          sf.getType())
            continue

        sw = solWriter(soltab, useCache=True)  # remember to flush()

        before_count = 0
        after_count = 0
        total = 0
        for vals, weights, coord, selection in sf.getValuesIter(
                returnAxes=axesToClip, weight=True):

            total += len(vals)
            before_count += (len(weights) - np.count_nonzero(weights))

            # first find the median and standard deviation
            if (weights == 0).all():
                valmedian = 0
            else:
                if log:
                    valmedian = np.median(np.log10(vals[(weights != 0)]))
                    rms = np.std(np.log10(vals[(weights != 0)]))
                    np.putmask(
                        weights,
                        np.abs(np.log10(vals) - valmedian) > rms * clipLevel,
                        0)
                else:
                    valmedian = np.median(vals[(weights != 0)])
                    rms = np.std(vals[(weights != 0)])
                    np.putmask(weights,
                               np.abs(vals - valmedian) > rms * clipLevel, 0)

            after_count += (len(weights) - np.count_nonzero(weights))

            # writing back the solutions
            sw.selection = selection
            sw.setValues(weights, weight=True)

        sw.addHistory('CLIP (over %s with %s sigma cut)' %
                      (axesToClip, clipLevel))
        logging.info('Clip, flagged data: %f %% -> %f %%' \
                % (100.*before_count/total, 100.*after_count/total))

        sw.flush()

    return 0
예제 #29
0
        sys.exit(1)
    skydbFile = os.path.join(inMSs[0], 'sky')
    if not os.path.isdir(skydbFile):
        logging.warning(
            'No sky table found. (Direction-dependent parameters will not work.)'
        )
        skydbFile = None

    #generate list of parmDB-filenames
    parmDBnames = [MS.rstrip('/') + instrument for MS in inMSs]

    #create and fill the hdf5-file:
    solset = parmDBs2h5parm(hdf5File,
                            parmDBnames,
                            antennaFile,
                            fieldFile,
                            skydbFile,
                            compression=compression,
                            solsetName=solsetName)

    # Add CREATE entry to history
    h5parmDB = h5parm(hdf5File, readonly=False)
    soltabs = h5parmDB.getSoltabs(solset=solset)
    for st in soltabs:
        sw = solWriter(soltabs[st])
        sw.addHistory(
            'CREATE (by PipelineStep_losotoImporter from %s / %s - %s)' %
            (os.path.abspath(''), os.path.basename(
                parmDBnames[0]), os.path.basename(parmDBnames[-1])))
    h5parmDB.close()
예제 #30
0
vals = np.arange(4*10*100).reshape(4,10,100)
logging.info("Create soltab")
H5.makeSoltab(ss, 'amplitude', 'stTest', axesNames=['axis1','axis2','axis3'], axesVals=axesVals, vals=vals, weights=vals)
logging.info("Create soltab (using same name)")
H5.makeSoltab(ss, 'amplitude', 'stTest', axesNames=['axis1','axis2','axis3'], axesVals=axesVals, vals=vals, weights=vals)
logging.info("Create soltab (using default name)")
H5.makeSoltab(ss, 'amplitude', axesNames=['axis1','axis2','axis3'], axesVals=axesVals, vals=vals, weights=vals)
logging.info('Get a soltab object')
st=H5.getSoltab(ss,'stTest')
logging.info('Get all soltabs:')
print H5.getSoltabs(ss)

print "###########################################"
logging.info('### solFetcher/solWriter - General')
Hsf = solFetcher(st)
Hsw = solWriter(st)
logging.info('Get solution Type (exp: amplitude)')
print Hsf.getType()
logging.info('Get Axes Names')
print Hsf.getAxesNames()
logging.info('Get Axis1 Len (exp: 4)')
print Hsf.getAxisLen('axis1')
logging.info('Get Axis1 Type (exp: str)')
print Hsf.getAxisType('axis1')
logging.info('Get Axis2 Type (exp: float)')
print Hsf.getAxisType('axis2')
logging.info('Get Axis1 Values (exp: a,b,c,d)')
print Hsf.getAxisValues('axis1')
logging.info('Set new axes values')
Hsw.setAxisValues('axis1',['e','f','g','h'])
logging.info('Get new Axis1 Values (exp: e,f,g,h)')
예제 #31
0
def run( step, parset, H ):

    import scipy.ndimage.filters
    import numpy as np
    from losoto.h5parm import solFetcher, solWriter

    soltabs = getParSoltabs( step, parset, H )

    axesToSmooth = parset.getStringVector('.'.join(["LoSoTo.Steps", step, "Axes"]), [] )
    FWHM = parset.getIntVector('.'.join(["LoSoTo.Steps", step, "FWHM"]), [] )
    mode = parset.getString('.'.join(["LoSoTo.Steps", step, "Mode"]), "runningmedian" )

    if mode == "runningmedian" and len(axesToSmooth) != len(FWHM):
        logging.error("Axes and FWHM lenghts must be equal.")
        return 1

    if mode == "runningmedian":
        logging.warning('Flagged data are still taken into account!')

    if FWHM != [] and mode != "runningmedian":
        logging.warning("FWHM makes sense only with runningmedian mode, ignoring it.")

    for soltab in openSoltabs( H, soltabs ):

        logging.info("Smoothing soltab: "+soltab._v_name)

        sf = solFetcher(soltab)
        sw = solWriter(soltab, useCache = True) # remember to flush!

        # axis selection
        userSel = {}
        for axis in sf.getAxesNames():
            userSel[axis] = getParAxis( step, parset, H, axis )
        sf.setSelection(**userSel)

        for i, axis in enumerate(axesToSmooth[:]):
            if axis not in sf.getAxesNames():
                del axesToSmooth[i]
                del FWHM[i]
                logging.warning('Axis \"'+axis+'\" not found. Ignoring.')

        for vals, weights, coord, selection in sf.getValuesIter(returnAxes=axesToSmooth, weight=True):

            if mode == 'runningmedian':
                valsnew = scipy.ndimage.filters.median_filter(vals, FWHM)
            elif mode == 'median':
                valsnew = np.median( vals[(weights!=0)] )
            elif mode == 'mean':
                valsnew = np.mean( vals[(weights!=0)] )
            else:
                logging.error('Mode must be: runningmedian, median or mean')
                return 1

            sw.selection = selection
            sw.setValues(valsnew)

        sw.flush()
        sw.addHistory('SMOOTH (over %s with mode = %s)' % (axesToSmooth, mode))
        del sf
        del sw
    return 0
예제 #32
0
파일: interp.py 프로젝트: AHorneffer/losoto
def run( step, parset, H ):
    """
    Interpolate the solutions from one table into a destination table
    """
    import itertools
    import scipy.interpolate
    import numpy as np
    from losoto.h5parm import solFetcher, solWriter

    soltabs = getParSoltabs( step, parset, H )
    solTypes = getParSolTypes( step, parset, H )

    calSoltab = parset.getString('.'.join(["LoSoTo.Steps", step, "CalSoltab"]), '' )
    calDir = parset.getString('.'.join(["LoSoTo.Steps", step, "CalDir"]), '' )
    interpAxes = parset.getStringVector('.'.join(["LoSoTo.Steps", step, "InterpAxes"]), ['time','freq'] )
    interpMethod = parset.getString('.'.join(["LoSoTo.Steps", step, "InterpMethod"]), 'linear' )
    medAxis = parset.getString('.'.join(["LoSoTo.Steps", step, "MedAxis"]), '' )
    rescale = parset.getBool('.'.join(["LoSoTo.Steps", step, "Rescale"]), False )

    if interpMethod not in ["nearest", "linear", "cubic"]:
        logging.error('Interpolation method must be nearest, linear or cubic.')
        return 1

    if rescale and medAxis == '':
        logging.error('A medAxis is needed for rescaling.')
        return 1

    # open calibration table
    css, cst = calSoltab.split('/')
    cr = solFetcher(H.getSoltab(css, cst))
    cAxesNames = cr.getAxesNames()

    for soltab in openSoltabs( H, soltabs ):

        logging.info("Interpolating soltab: "+soltab._v_name)

        tr = solFetcher(soltab)
        tw = solWriter(soltab)

        axesNames = tr.getAxesNames()
        for i, interpAxis in enumerate(interpAxes[:]):
            if interpAxis not in axesNames or interpAxis not in cAxesNames:
                logging.error('Axis '+interpAxis+' not found. Ignoring.')
                del interpAxes[i]
        if rescale and (medAxis not in axesNames or medAxis not in cAxesNames):
            logging.error('Axis '+medAxis+' not found. Cannot proceed.')
            return 1

        # axis selection
        userSel = {}
        for axis in tr.getAxesNames():
            userSel[axis] = getParAxis( step, parset, H, axis )
        tr.setSelection(**userSel)

        for vals, coord, selection in tr.getValuesIter(returnAxes=interpAxes):

            # construct grid
            coordSel = removeKeys(coord, interpAxes)
            logging.debug("Working on coords:"+str(coordSel))
            # change dir if sepcified
            if calDir != '':
                coordSel['dir'] = calDir
            cr.setSelection(**coordSel)
            calValues, calCoord = cr.getValues()

            # fill medAxis with the median value
            if rescale:
                axis = cAxesNames.index(medAxis)
                calValues = np.repeat( np.expand_dims( np.median( calValues, axis ), axis ), calValues.shape[axis], axis )

            # create a list of values whose coords are calPoints
            calValues = np.ndarray.flatten(calValues)

            # create calibrator/target coordinates arrays
            calPoints = []
            targetPoints = []
            for interpAxis in interpAxes:
                calPoints.append(calCoord[interpAxis])
                targetPoints.append(coord[interpAxis])
            calPoints = np.array([x for x in itertools.product(*calPoints)])
            targetPoints = np.array([x for x in itertools.product(*targetPoints)])

            # interpolation
            valsNew = scipy.interpolate.griddata(calPoints, calValues, targetPoints, interpMethod)

            # fill values outside boudaries with "nearest" solutions
            # NOTE: in 1D is useless due to scipy bug but this is compensated in the next if
            if interpMethod != 'nearest':
                valsNewNearest = scipy.interpolate.griddata(calPoints, calValues, targetPoints, 'nearest')
                # NaN is != from itself
                valsNew[ np.isnan(valsNew) ] = valsNewNearest [ np.isnan(valsNew) ]

            # fix bug in Scipy which put NaNs outside the convex hull in 1D for 'nearest'
            if len(np.squeeze(vals).shape) == 1:
                import scipy.cluster.vq as vq
                valsNew = np.squeeze(valsNew)
                code, dist = vq.vq(targetPoints, calPoints)
                # NaN is != from itself
                valsNew[ np.isnan(valsNew) ] = calValues[code][ np.isnan(valsNew) ]
                valsNew = valsNew.reshape(vals.shape)

            if rescale:
                # rescale solutions
                axis = interpAxes.index(medAxis)
                valsMed = np.repeat( np.expand_dims( np.median( vals, axis ), axis ), vals.shape[axis], axis )
                valsNewMed = np.repeat( np.expand_dims( np.median( valsNew, axis ), axis ), valsNew.shape[axis], axis )
                valsNew = vals*valsNewMed/valsMed
                #print "Rescaling by: ", valsNewMed[:,0]/valsMed[:,0]

            # writing back the solutions
            tw.selection = selection
            tw.setValues(valsNew)

    tw.addHistory('INTERP (from table %s)' % (calSoltab))
    return 0
예제 #33
0
def run( step, parset, H ):
   """
   Generic unspecified step for easy expansion.
   """
   import numpy as np
   from losoto.h5parm import solFetcher, solWriter
   # all the following are LoSoTo function to extract information from the parset

   # get involved solsets using local step values or global values or all
   solsets = getParSolsets( step, parset, H )
   logging.info('Solset: '+str(solsets))
   # get involved soltabs using local step values or global values or all
   soltabs = getParSoltabs( step, parset, H )
   logging.info('Soltab: '+str(soltabs))
   # get list of SolTypes using local step values or global values or all
   solTypes = getParSolTypes( step, parset, H )
   logging.info('SolType: '+str(solTypes))


   # do something on every soltab (use the openSoltab LoSoTo function)
   for soltab in openSoltabs( H, soltabs ):
        logging.info("--> Working on soltab: "+soltab._v_name)
        # use the solFetcher from the H5parm lib
        t = solFetcher(soltab)
        tw = solWriter(soltab)

        axisNames = t.getAxesNames()
        logging.info("Axis names are: "+str(axisNames))

        solType = t.getType()
        logging.info("Soltab type is: "+solType)

        # this will make a selection for the getValues() and getValuesIter()
        # interpret every entry in the parset which has an axis name as a selector
        userSel = {}
        for axis in t.getAxesNames():
            userSel[axis] = getParAxis( step, parset, H, axis )
        t.setSelection(**userSel)

        t.setSelection(ant=ants, pol=pols, dir=dirs)
        logging.info("Selection is: "+str(t.selection))

        # find axis values
        logging.info("Antennas (no selection) are: "+str(t.getAxisValues('ant', ignoreSelection=True)))
        logging.info("Antennas (with selection) are: "+str(t.getAxisValues('ant')))
        # but one can also use (selection is active here!)
        logging.info("Antennas (other method) are: "+str(t.ant))
        logging.info("Frequencies are: "+str(t.freq))
        logging.info("Directions are: "+str(t.dir))
        logging.info("Polarizations are: "+str(t.pol))
        # try to access a non-existent axis
        t.getAxisValues('nonexistantaxis')

        # now get all values given this selection
        logging.info("Get data using t.val")
        val = t.val
        logging.debug('shape of val: '+str(t.val.shape))
        logging.info("$ val is "+str(val[0,0,0,0,100]))
        weight = t.weight
        time = t.time
        thisTime = t.time[100]

        # another way to get the data is using the getValues()
        logging.info("Get data using getValues()")
        grid, axes = t.getValues()
        # axis names
        logging.info("Axes: "+str(t.getAxesNames()))
        # axis shape
        print axes
        print [t.getAxisLen(axis) for axis in axes] # not ordered, is a dict!
예제 #34
0
def run(step, parset, H):
    """
    Interpolate the solutions from one table into a destination table
    """
    import itertools
    import scipy.interpolate
    import numpy as np
    from losoto.h5parm import solFetcher, solWriter

    soltabs = getParSoltabs(step, parset, H)
    solTypes = getParSolTypes(step, parset, H)

    calSoltab = parset.getString('.'.join(["LoSoTo.Steps", step, "CalSoltab"]),
                                 '')
    calDir = parset.getString('.'.join(["LoSoTo.Steps", step, "CalDir"]), '')
    interpAxes = parset.getStringVector(
        '.'.join(["LoSoTo.Steps", step, "InterpAxes"]), ['time', 'freq'])
    interpMethod = parset.getString(
        '.'.join(["LoSoTo.Steps", step, "InterpMethod"]), 'linear')
    medAxis = parset.getString('.'.join(["LoSoTo.Steps", step, "MedAxis"]), '')
    rescale = parset.getBool('.'.join(["LoSoTo.Steps", step, "Rescale"]),
                             False)

    if interpMethod not in ["nearest", "linear", "cubic"]:
        logging.error('Interpolation method must be nearest, linear or cubic.')
        return 1

    if rescale and medAxis == '':
        logging.error('A medAxis is needed for rescaling.')
        return 1

    # open calibration table
    css, cst = calSoltab.split('/')
    cr = solFetcher(H.getSoltab(css, cst))
    cAxesNames = cr.getAxesNames()

    for soltab in openSoltabs(H, soltabs):

        logging.info("Interpolating soltab: " + soltab._v_name)

        tr = solFetcher(soltab)
        tw = solWriter(soltab)

        axesNames = tr.getAxesNames()
        for i, interpAxis in enumerate(interpAxes[:]):
            if interpAxis not in axesNames or interpAxis not in cAxesNames:
                logging.error('Axis ' + interpAxis + ' not found. Ignoring.')
                del interpAxes[i]
        if rescale and (medAxis not in axesNames or medAxis not in cAxesNames):
            logging.error('Axis ' + medAxis + ' not found. Cannot proceed.')
            return 1

        # axis selection
        userSel = {}
        for axis in tr.getAxesNames():
            userSel[axis] = getParAxis(step, parset, H, axis)
        tr.setSelection(**userSel)

        for vals, coord, selection in tr.getValuesIter(returnAxes=interpAxes):

            # construct grid
            coordSel = removeKeys(coord, interpAxes)
            logging.debug("Working on coords:" + str(coordSel))
            # change dir if sepcified
            if calDir != '':
                coordSel['dir'] = calDir
            cr.setSelection(**coordSel)
            calValues, calCoord = cr.getValues()

            # fill medAxis with the median value
            if rescale:
                axis = cAxesNames.index(medAxis)
                calValues = np.repeat(
                    np.expand_dims(np.median(calValues, axis), axis),
                    calValues.shape[axis], axis)

            # create a list of values whose coords are calPoints
            calValues = np.ndarray.flatten(calValues)

            # create calibrator/target coordinates arrays
            calPoints = []
            targetPoints = []
            for interpAxis in interpAxes:
                calPoints.append(calCoord[interpAxis])
                targetPoints.append(coord[interpAxis])
            calPoints = np.array([x for x in itertools.product(*calPoints)])
            targetPoints = np.array(
                [x for x in itertools.product(*targetPoints)])

            # interpolation
            valsNew = scipy.interpolate.griddata(calPoints, calValues,
                                                 targetPoints, interpMethod)

            # fill values outside boudaries with "nearest" solutions
            # NOTE: in 1D is useless due to scipy bug but this is compensated in the next if
            if interpMethod != 'nearest':
                valsNewNearest = scipy.interpolate.griddata(
                    calPoints, calValues, targetPoints, 'nearest')
                # NaN is != from itself
                valsNew[np.isnan(valsNew)] = valsNewNearest[np.isnan(valsNew)]

            # fix bug in Scipy which put NaNs outside the convex hull in 1D for 'nearest'
            if len(np.squeeze(vals).shape) == 1:
                import scipy.cluster.vq as vq
                valsNew = np.squeeze(valsNew)
                code, dist = vq.vq(targetPoints, calPoints)
                # NaN is != from itself
                valsNew[np.isnan(valsNew)] = calValues[code][np.isnan(valsNew)]
                valsNew = valsNew.reshape(vals.shape)

            if rescale:
                # rescale solutions
                axis = interpAxes.index(medAxis)
                valsMed = np.repeat(
                    np.expand_dims(np.median(vals, axis), axis),
                    vals.shape[axis], axis)
                valsNewMed = np.repeat(
                    np.expand_dims(np.median(valsNew, axis), axis),
                    valsNew.shape[axis], axis)
                valsNew = vals * valsNewMed / valsMed
                #print "Rescaling by: ", valsNewMed[:,0]/valsMed[:,0]

            # writing back the solutions
            tw.selection = selection
            tw.setValues(valsNew)

    tw.addHistory('INTERP (from table %s)' % (calSoltab))
    return 0
예제 #35
0
파일: tecfit.py 프로젝트: twshimwell/losoto
def run(step, parset, H):
    """
    Fit phase solutions to obtain TEC values per direction.

    Phase solutions are assumed to be stored in solsets of the H5parm file, one
    solset per band per field. Only phase- or scalarphase-type solution tables
    are used. If direction-independent solutions are found (in addition to the
    direction-dependent ones), they are added, after averaging, to the
    corresponding direction-dependent ones. Phase solutions are automatically
    grouped by field and by band.

    The derived TEC values are stored in the specified output soltab of type
    'tec', with one TEC value per station per direction per solution interval.
    The TEC values are derived using the ``lofar.expion.baselinefitting.fit()``
    function to fit a TEC value to the phases. The term that is minimized
    includes all baselines, so there is no preferred reference station, and the
    residual is computed as the complex number 1.0 - exp(1i phasedifference),
    which is zero when the phase difference is a multiple of 2pi.

    The TEC solution table may be used to derive TEC screens using the
    TECSCREEN operation.
    """
    from losoto.h5parm import solFetcher, solWriter
    import numpy as np
    # Switch to the Agg backend to prevent problems with pylab imports when
    # DISPLAY env. variable is not set
    import os
    if 'DISPLAY' not in os.environ:
        import matplotlib
        matplotlib.use("Agg")
    from pylab import find
    import re
    from .tecscreen import calculate_piercepoints
    from numpy.linalg import norm

    solsets = getParSolsets(step, parset, H)
    ants = getParAxis(step, parset, H, 'ant')
    pols = getParAxis(step, parset, H, 'pol')
    dirs = getParAxis(step, parset, H, 'dir')
    nband_min = int(
        parset.getString('.'.join(["LoSoTo.Steps", step, "MinBands"]), '8'))
    niter = int(
        parset.getString('.'.join(["LoSoTo.Steps", step, "NumIter"]), '1'))
    dist_cut_m = np.float(
        parset.getString('.'.join(["LoSoTo.Steps", step, "DistCut"]), '2e3'))
    nstations_max = int(
        parset.getString('.'.join(["LoSoTo.Steps", step, "MaxStations"]),
                         '100'))
    outSolset = parset.getString('.'.join(["LoSoTo.Steps", step, "OutSoltab"]),
                                 '').split('/')[0]
    outSoltab = parset.getString('.'.join(["LoSoTo.Steps", step, "OutSoltab"]),
                                 '').split('/')[1]

    # Collect solutions, etc. into arrays for fitting.
    (phases0, phases1, flags, mask, station_names, station_positions,
     source_names, source_positions, freqs, times, pointing,
     soln_type) = collect_solutions(H, dirs=dirs, solsets=solsets)
    if phases0 is None:
        return 1

    # Build list of stations to include
    included_stations = []
    if ants is not None:
        for ant in ants:
            included_stations += [
                s for s in station_names if re.search(ant, s)
            ]
    else:
        included_stations = station_names
    excluded_stations = [
        s for s in station_names if s not in included_stations
    ]

    # Select stations to use for first pass
    if len(excluded_stations) > 0:
        logging.info("Excluding stations: {0}".format(
            np.sort(excluded_stations)))
    mean_position = np.array([
        np.median(station_positions[:, 0]),
        np.median(station_positions[:, 1]),
        np.median(station_positions[:, 2])
    ])
    dist = np.sqrt(np.sum((station_positions - mean_position)**2, axis=1))
    dist_sort_ind = np.argsort(dist)
    station_selection1 = find(dist < dist_cut_m)
    station_selection = np.array([
        i for i in dist_sort_ind if i in station_selection1
        and station_names[i] not in excluded_stations
    ])
    if len(station_selection) > nstations_max:
        station_selection = station_selection[:nstations_max]
    logging.info("Using normal fitting (no iterative search) for {0} stations "
                 "within {1} km of the core:\n{2}".format(
                     len(station_selection), dist_cut_m / 1000.0,
                     station_names[station_selection]))

    station_selection_orig = station_selection
    for iter in xrange(niter):
        # Loop over groups of nearby pierce points to identify bad stations and
        # remove them from the station_selection.
        nsig = 2.5  # number of sigma for cut
        radius = 2.0  # projected radius in km within which to compare
        if iter > 0:
            logging.info("Identifying bad stations from outlier TEC fits...")
            logging.info(
                "Finding nearby piercepoints (assuming typical screen "
                "height of 200 km...")

            # For each source, find all the pierce points within give (projected)
            # distance from the median pierce point x,y location. Assume a typical
            # screen height of 200 km.
            pp, airmass = calculate_piercepoints(
                station_positions[station_selection],
                source_positions[source_selection],
                times,
                height=200e3)
            pp = pp[0, :, :]  # use first time [times, stations, dimension]
            x, y, z = station_positions[station_selection][0, :]
            east = np.array([-y, x, 0])
            east = east / norm(east)
            north = np.array([-x, -y, (x * x + y * y) / z])
            north = north / norm(north)
            up = np.array([x, y, z])
            up = up / norm(up)
            T = np.concatenate([east[:, np.newaxis], north[:, np.newaxis]],
                               axis=1)
            pp1 = np.dot(pp, T).reshape(
                (len(source_names), len(station_selection), 2))
            for i in xrange(len(source_names)):
                x_median = np.median(pp1[i, :, 0]) / 1000.0
                y_median = np.median(pp1[i, :, 1]) / 1000.0
                dist = np.sqrt((pp1[i, :, 0] / 1000.0 - x_median)**2 +
                               (pp1[i, :, 1] / 1000.0 - y_median)**2)
                within_radius = np.where(dist <= radius)[0]
                outside_radius = np.where(dist > radius)
                if len(within_radius) < 10:
                    logging.info(
                        "Insufficient number of closely-spaced pierce "
                        "points for bad-station detection. Skipping...")
                    abort_iter = True
                    break
                else:
                    abort_iter = False
                r_median = np.median(r[i, :, within_radius], axis=1)
                r_tot_meddiff = np.zeros(len(station_selection[within_radius]),
                                         dtype=float)
                for j in xrange(len(station_selection[within_radius])):
                    r_tot_meddiff[j] = np.sum(
                        np.abs(r[i, :, within_radius[j]] - r_median[j]))
            if abort_iter:
                break
            good_stations = np.where(
                r_tot_meddiff < nsig * np.median(r_tot_meddiff))
            station_selection = np.append(
                station_selection[within_radius[good_stations]],
                station_selection[outside_radius])

            new_excluded_stations = [
                station_names[s] for s in station_selection_orig
                if (s not in station_selection and s not in excluded_stations)
            ]
            if len(new_excluded_stations) > 0:
                logging.info(
                    'Excluding stations due to TEC solutions that differ '
                    'significantly from mean: {0}'.format(
                        np.sort(new_excluded_stations)))
                logging.info('Updating fit...')
                excluded_stations += new_excluded_stations
                nstations_max -= len(new_excluded_stations)
            else:
                logging.info('No bad stations found.')
                break

        # Fit a TEC value to the phase solutions per source pair.
        # No iterative search for the global minimum is done
        if soln_type == 'scalarphase':
            r, source_selection = fit_tec_per_source_pair(
                phases0[:, station_selection, :, :],
                flags[:, station_selection, :, :],
                mask,
                freqs,
                propagate=True,
                nband_min=nband_min)
            if r is None:
                return 1
        else:
            r0, source_selection = fit_tec_per_source_pair(
                phases0[:, station_selection, :, :],
                flags[:, station_selection, :, :],
                mask,
                freqs,
                propagate=True,
                nband_min=nband_min)
            r1, source_selection = fit_tec_per_source_pair(
                phases1[:, station_selection, :, :],
                flags[:, station_selection, :, :],
                mask,
                freqs,
                propagate=True,
                nband_min=nband_min)
            if r0 is None or r1 is None:
                return 1

            # take the mean of the two polarizations
            r = (r0 + r1) / 2

    # Add stations by searching iteratively for global minimum in solution space
    station_selection, r = add_stations(station_selection,
                                        phases0,
                                        phases1,
                                        flags,
                                        mask,
                                        station_names,
                                        station_positions,
                                        source_names,
                                        source_selection,
                                        times,
                                        freqs,
                                        r,
                                        nband_min=nband_min,
                                        soln_type=soln_type,
                                        nstations_max=nstations_max,
                                        excluded_stations=excluded_stations,
                                        search_full_tec_range=False)

    # Save TEC values to the output solset
    solset = H.makeSolset(outSolset)
    dirs_out = []
    dirs_pos = []
    for s in source_selection:
        dirs_out.append(source_names[s])
        dirs_pos.append(source_positions[s])
    sourceTable = solset._f_get_child('source')
    sourceTable.append(zip(*(dirs_out, dirs_pos)))

    times_out = times

    ants_out = []
    ants_pos = []
    for s in station_selection:
        ants_out.append(station_names[s])
        ants_pos.append(station_positions[s])
    antennaTable = solset._f_get_child('antenna')
    antennaTable.append(zip(*(ants_out, ants_pos)))

    tf_st = H.makeSoltab(solset._v_name,
                         'tec',
                         outSoltab,
                         axesNames=['dir', 'time', 'ant'],
                         axesVals=[dirs_out, times, ants_out],
                         vals=r[source_selection, :, :],
                         weights=np.ones_like(r[source_selection, :, :]))

    # Add history
    sw = solWriter(tf_st)
    sw.addHistory('CREATE (by TECFIT operation)')

    return 0
예제 #36
0
def run(step, parset, H):
    """
    Fits a screen to TEC values derived by the TECFIT operation.

    The TEC values are read from the specified tec soltab.

    The results of the fit are stored in the specified tecscreen solution table.
    These values are the screen TEC values per station per pierce point per
    solution interval. The pierce point locations are stored in an auxiliary
    array in the output solution table.

    TEC screens can be plotted with the PLOT operation by setting PlotType =
    TECScreen.

    The H5parm_exporter.py tool can be used to export the screen to a parmdb
    that BBS and the AWimager can use. Note, however, that the output screens
    are not normalized properly (any normalization was lost due to the use of
    source-to-source phase gradients in the TECFIT operation). Therefore, a
    direction-independent calibration must be done after exporting the screens
    to a parmdb file, with the following settings in the BBS solve step:

        Model.Ionosphere.Enable = T
        Model.Ionosphere.Type = EXPION
    """
    import numpy as np
    import re
    from losoto.h5parm import solFetcher, solWriter
    # Switch to the Agg backend to prevent problems with pylab imports when
    # DISPLAY env. variable is not set
    import os
    if 'DISPLAY' not in os.environ:
        import matplotlib
        matplotlib.use("Agg")

    soltabs = getParSoltabs(step, parset, H)
    outSoltabs = parset.getStringVector(
        '.'.join(["LoSoTo.Steps", step, "OutSoltab"]), [])
    height = np.array(
        parset.getDoubleVector('.'.join(["LoSoTo.Steps", step, "Height"]),
                               [200e3]))
    order = int(
        parset.getString('.'.join(["LoSoTo.Steps", step, "Order"]), '15'))

    # Load TEC values from TECFIT operation
    indx = 0
    for soltab in openSoltabs(H, soltabs):
        if 'tec' not in soltab._v_title:
            logging.warning(
                'No TECFIT solution tables found for solution table '
                '{0}'.format(soltabs[indx]))
            continue
            indx += 1
        solset = soltabs[indx].split('/')[0]
        logging.info('Using input solution table: {0}'.format(soltabs[indx]))
        logging.info('Using output solution table: {0}'.format(
            outSoltabs[indx]))

        # Collect station and source names and positions and times, making sure
        # that they are ordered correctly.
        t = solFetcher(soltab)
        r, axis_vals = t.getValues()
        source_names = axis_vals['dir']
        source_dict = H.getSou(solset)
        source_positions = []
        for source in source_names:
            source_positions.append(source_dict[source])
        station_names = axis_vals['ant']
        station_dict = H.getAnt(solset)
        station_positions = []
        for station in station_names:
            station_positions.append(station_dict[station])
        times = axis_vals['time']

        # Get sizes
        N_sources = len(source_names)
        N_times = len(times)
        N_stations = len(station_names)
        N_piercepoints = N_sources * N_stations
        rr = np.reshape(r.transpose([0, 2, 1]), [N_piercepoints, N_times])

        heights = list(set(np.linspace(height[0], height[-1], 5)))
        heights.sort()
        if len(heights) > 1:
            logging.info('Trying range of heights: {0} m'.format(heights))
        for i, height in enumerate(heights):
            # Find pierce points and airmass values for given screen height
            logging.info('Using height = {0} m and order = {1}'.format(
                height, order))
            if height < 100e3:
                logging.warning("Height is less than 100e3 m.")
            pp, airmass = calculate_piercepoints(np.array(station_positions),
                                                 np.array(source_positions),
                                                 np.array(times), height)

            # Fit a TEC screen
            r_0 = 10e3
            beta = 5.0 / 3.0
            tec_screen, residual = fit_screen_to_tec(station_names,
                                                     source_names, pp, airmass,
                                                     rr, times, height, order,
                                                     r_0, beta)
            total_resid = np.sum(np.abs(residual))
            if i > 0:
                if total_resid < best_resid:
                    tec_screen_best = tec_screen
                    pp_best = pp
                    height_best = height
                    best_resid = total_resid
            else:
                tec_screen_best = tec_screen
                pp_best = pp
                height_best = height
                best_resid = total_resid
            if len(heights) > 1:
                logging.info(
                    'Total residual for fit: {0}\n'.format(total_resid))

        # Use screen with lowest total residual
        if len(heights) > 1:
            tec_screen = tec_screen_best
            pp = pp_best
            height = height_best
            logging.info(
                'Using height (with lowest total residual) of {0} m'.format(
                    height))

        # Write the results to the output solset
        dirs_out = source_names
        times_out = times
        ants_out = station_names

        # Make output tecscreen table
        outSolset = outSoltabs[indx].split('/')[0]
        outSoltab = outSoltabs[indx].split('/')[1]
        if not outSolset in H.getSolsets().keys():
            solsetTEC = H.makeSolset(outSolset)
            dirs_pos = source_positions
            sourceTable = solsetTEC._f_get_child('source')
            sourceTable.append(zip(*(dirs_out, dirs_pos)))
            ants_pos = station_positions
            antennaTable = solsetTEC._f_get_child('antenna')
            antennaTable.append(zip(*(ants_out, ants_pos)))

        # Store tecscreen values. The residual values are stored in the weights
        # table. Flagged values of the screen have weights set to 0.0.
        vals = tec_screen.transpose([1, 0, 2])
        weights = residual.transpose([1, 0, 2])
        tec_screen_st = H.makeSoltab(outSolset,
                                     'tecscreen',
                                     outSoltab,
                                     axesNames=['dir', 'time', 'ant'],
                                     axesVals=[dirs_out, times_out, ants_out],
                                     vals=vals,
                                     weights=weights)

        # Store beta, r_0, height, and order as attributes of the tecscreen
        # soltab
        tec_screen_st._v_attrs['beta'] = beta
        tec_screen_st._v_attrs['r_0'] = r_0
        tec_screen_st._v_attrs['height'] = height
        tec_screen_st._v_attrs['order'] = order

        # Make output piercepoint table
        tec_screen_solset = tec_screen_st._v_parent._v_name
        H.H.create_carray('/' + tec_screen_solset + '/' +
                          tec_screen_st._v_name,
                          'piercepoint',
                          obj=pp)

        # Add histories
        sw = solWriter(tec_screen_st)
        sw.addHistory('CREATE (by TECSCREEN operation)')
        indx += 1

    return 0
예제 #37
0
elapsed = (time.clock() - start)
logging.info("PARMDB -- " + str(elapsed) + " s.")

start = time.clock()
for i in range(n):
    H.setSelection(dir='pointing', ant='RS*')
    Hrot = H.getValues(retAxesVals=False)
elapsed = (time.clock() - start)
logging.info("H5parm -- " + str(elapsed) + " s.")

#print "Equal?", (Prot == np.squeeze(Hrot)).all()

######################################################
# read+write
logging.info("### Read all rotations for a dir/station and write them back")
Hw = solWriter(H5.getSoltab(solset, 'rotation000'))

start = time.clock()
for i in range(n):
    Prot = P.getValuesGrid('CommonRotationAngle:CS001LBA')
    Prot = {'test' + str(i): Prot['CommonRotationAngle:CS001LBA']}
    P2.addValues(Prot)
    # parmdb write?
elapsed = (time.clock() - start)
logging.info("PARMDB -- " + str(elapsed) + " s.")

start = time.clock()
for i in range(n):
    H.setSelection(dir='pointing', ant='CS001LBA')
    Hrot = H.getValues(retAxesVals=False)
    Hw.setSelection(dir='pointing', ant='CS001LBA')
예제 #38
0
def run( step, parset, H ):
    """
    subtract a clock and/or tec from a phase.
    """
    import numpy as np
    from losoto.h5parm import solFetcher, solWriter

    soltabs = getParSoltabs( step, parset, H )
    soltabsToSub = parset.getStringVector('.'.join(["LoSoTo.Steps", step, "Sub"]), [] )
    ratio = parset.getBool('.'.join(["LoSoTo.Steps", step, "Ratio"]), False )

    for soltab in openSoltabs( H, soltabs ):
        logging.info("--> Working on soltab: "+soltab._v_name)

        sf = solFetcher(soltab)
        sw = solWriter(soltab, useCache = True)

        sfss = [] # sol fetcher to sub tables
        for soltabToSub in soltabsToSub:
            ss, st = soltabToSub.split('/')
            sfs = solFetcher(H.getSoltab(ss, st))
            if sf.getType() != 'phase' and (sfs.getType() == 'tec' or sfs.getType() == 'clock' or sfs.getType() == 'rotationmeasure' or sfs.getType() == 'tec3rd'):
                logging.warning(soltabToSub+' is of type clock/tec/rm and should be subtracted from a phase. Skipping it.')
                continue
            sfss.append( sfs )
            logging.info('Subtracting table: '+soltabToSub)

            # a major speed up if tables are assumed with same axes, check that (should be the case in almost any case)
            for axisName in sfs.getAxesNames():
                assert all(sfs.getAxisValues(axisName) == sf.getAxisValues(axisName))
        
        if sf.getType() == 'phase' and (sfs.getType() == 'tec' or sfs.getType() == 'clock' or sfs.getType() == 'rotationmeasure' or sfs.getType() == 'tec3rd' ):
            # the only return axes is freq, slower but better code
            for vals, weights, coord, selection in sf.getValuesIter(returnAxes='freq', weight = True):

                for sfs in sfss:

                    # restrict to have the same coordinates of phases
                    for i, axisName in enumerate(sfs.getAxesNames()):
                        sfs.selection[i] = selection[sf.getAxesNames().index(axisName)]

                    valsSub = np.squeeze(sfs.getValues(retAxesVals=False, weight=False))
                    weightsSub = np.squeeze(sfs.getValues(retAxesVals=False, weight=True))

                    if sfs.getType() == 'clock':
                        vals -= 2. * np.pi * valsSub * coord['freq']

                    elif sfs.getType() == 'tec':
                        vals -= -8.44797245e9 * valsSub / coord['freq']

                    elif sfs.getType() == 'tec3rd':
                        vals -= - 1.e21 * valsSub / np.power(coord['freq'],3)

                    elif sfs.getType() == 'rotationmeasure':
                        wav = 2.99792458e8/coord['freq']
                        ph = wav * wav * valsSub
                        if coord['pol'] == 'XX' or coord['pol'] == 'RR':
                            vals -= ph
                        elif coord['pol'] == 'YY' or coord['pol'] == 'LL':
                            vals += ph
                    else:
                        vals -= valsSub

                    # flag data that are contaminated by flagged clock/tec data
                    if weightsSub == 0: weights[:] = 0

                sw.selection = selection
                sw.setValues(vals)
                sw.setValues(weights, weight = True)
        else:
                if ratio: sw.setValues((sf.getValues(retAxesVals=False)-sfs.getValues(retAxesVals=False))/sfs.getValues(retAxesVals=False))
                else: sw.setValues(sf.getValues(retAxesVals=False)-sfs.getValues(retAxesVals=False))
                weight = sf.getValues(retAxesVals=False, weight=True)
                weight[sfs.getValues(retAxesVals=False, weight=True) == 0] = 0
                sw.setValues(weight, weight = True)
            
        sw.addHistory('RESIDUALS by subtracting tables '+' '.join(soltabsToSub))
        sw.flush()
        del sf
        del sw
        
    return 0
예제 #39
0
def run( step, parset, H ):
    """
    Fit phase solutions to obtain TEC values per direction.

    Phase solutions are assumed to be stored in solsets of the H5parm file, one
    solset per band per field. Only phase- or scalarphase-type solution tables
    are used. If direction-independent solutions are found (in addition to the
    direction-dependent ones), they are added, after averaging, to the
    corresponding direction-dependent ones. Phase solutions are automatically
    grouped by field and by band.

    The derived TEC values are stored in the specified output soltab of type
    'tec', with one TEC value per station per direction per solution interval.
    The TEC values are derived using the ``lofar.expion.baselinefitting.fit()``
    function to fit a TEC value to the phases. The term that is minimized
    includes all baselines, so there is no preferred reference station, and the
    residual is computed as the complex number 1.0 - exp(1i phasedifference),
    which is zero when the phase difference is a multiple of 2pi.

    The TEC solution table may be used to derive TEC screens using the
    TECSCREEN operation.
    """
    from losoto.h5parm import solFetcher, solWriter
    import numpy as np
    # Switch to the Agg backend to prevent problems with pylab imports when
    # DISPLAY env. variable is not set
    import os
    if 'DISPLAY' not in os.environ:
        import matplotlib
        matplotlib.use("Agg")
    from pylab import find
    import re
    from .tecscreen import calculate_piercepoints
    from numpy.linalg import norm

    solsets = getParSolsets( step, parset, H )
    ants = getParAxis( step, parset, H, 'ant' )
    pols = getParAxis( step, parset, H, 'pol' )
    dirs = getParAxis( step, parset, H, 'dir' )
    nband_min = int(parset.getString('.'.join(["LoSoTo.Steps", step, "MinBands"]), '8' ))
    niter = int(parset.getString('.'.join(["LoSoTo.Steps", step, "NumIter"]), '1' ))
    dist_cut_m = np.float(parset.getString('.'.join(["LoSoTo.Steps", step, "DistCut"]), '2e3' ))
    nstations_max = int(parset.getString('.'.join(["LoSoTo.Steps", step, "MaxStations"]), '100' ))
    outSolset = parset.getString('.'.join(["LoSoTo.Steps", step, "OutSoltab"]), '' ).split('/')[0]
    outSoltab = parset.getString('.'.join(["LoSoTo.Steps", step, "OutSoltab"]), '' ).split('/')[1]

    # Collect solutions, etc. into arrays for fitting.
    (phases0, phases1, flags, mask, station_names, station_positions,
        source_names, source_positions, freqs, times, pointing,
        soln_type) = collect_solutions(H, dirs=dirs, solsets=solsets)
    if phases0 is None:
        return 1

    # Build list of stations to include
    included_stations = []
    if ants is not None:
        for ant in ants:
            included_stations += [s for s in station_names if re.search(ant, s)]
    else:
        included_stations = station_names
    excluded_stations = [s for s in station_names if s not in included_stations]

    # Select stations to use for first pass
    if len(excluded_stations) > 0:
        logging.info("Excluding stations: {0}".format(np.sort(excluded_stations)))
    mean_position = np.array([np.median(station_positions[:, 0]),
        np.median(station_positions[:, 1]), np.median(station_positions[:, 2])])
    dist = np.sqrt(np.sum((station_positions - mean_position)**2, axis=1))
    dist_sort_ind = np.argsort(dist)
    station_selection1 = find(dist < dist_cut_m)
    station_selection = np.array([i for i in dist_sort_ind
        if i in station_selection1 and station_names[i] not in excluded_stations])
    if len(station_selection) > nstations_max:
        station_selection = station_selection[:nstations_max]
    logging.info("Using normal fitting (no iterative search) for {0} stations "
        "within {1} km of the core:\n{2}".format(len(station_selection),
        dist_cut_m/1000.0, station_names[station_selection]))

    station_selection_orig = station_selection
    for iter in xrange(niter):
        # Loop over groups of nearby pierce points to identify bad stations and
        # remove them from the station_selection.
        nsig = 2.5 # number of sigma for cut
        radius = 2.0 # projected radius in km within which to compare
        if iter > 0:
            logging.info("Identifying bad stations from outlier TEC fits...")
            logging.info("Finding nearby piercepoints (assuming typical screen "
                "height of 200 km...")

            # For each source, find all the pierce points within give (projected)
            # distance from the median pierce point x,y location. Assume a typical
            # screen height of 200 km.
            pp, airmass = calculate_piercepoints(station_positions[station_selection],
                source_positions[source_selection], times, height = 200e3)
            pp = pp[0, :, :] # use first time [times, stations, dimension]
            x, y, z = station_positions[station_selection][0,:]
            east = np.array([-y, x, 0])
            east = east / norm(east)
            north = np.array([ -x, -y, (x*x + y*y)/z])
            north = north / norm(north)
            up = np.array([x ,y, z])
            up = up / norm(up)
            T = np.concatenate([east[:, np.newaxis], north[:, np.newaxis]], axis=1)
            pp1 = np.dot(pp, T).reshape((len(source_names), len(station_selection), 2))
            for i in xrange(len(source_names)):
                x_median = np.median(pp1[i, :, 0]) / 1000.0
                y_median = np.median(pp1[i, :, 1]) / 1000.0
                dist = np.sqrt( (pp1[i, :, 0] / 1000.0 - x_median)**2 +
                    (pp1[i, :, 1] / 1000.0 - y_median)**2 )
                within_radius = np.where(dist <= radius)[0]
                outside_radius = np.where(dist > radius)
                if len(within_radius) < 10:
                    logging.info("Insufficient number of closely-spaced pierce "
                        "points for bad-station detection. Skipping...")
                    abort_iter = True
                    break
                else:
                    abort_iter = False
                r_median = np.median(r[i, :, within_radius], axis=1)
                r_tot_meddiff = np.zeros(len(station_selection[within_radius]),
                    dtype=float)
                for j in xrange(len(station_selection[within_radius])):
                    r_tot_meddiff[j] = np.sum(np.abs(r[i, :, within_radius[j]] - r_median[j]))
            if abort_iter:
                break
            good_stations = np.where(r_tot_meddiff < nsig * np.median(r_tot_meddiff))
            station_selection = np.append(station_selection[within_radius[good_stations]],
                station_selection[outside_radius])

            new_excluded_stations = [station_names[s] for s in
                station_selection_orig if (s not in station_selection and
                s not in excluded_stations)]
            if len(new_excluded_stations) > 0:
                logging.info('Excluding stations due to TEC solutions that differ '
                    'significantly from mean: {0}'.format(np.sort(new_excluded_stations)))
                logging.info('Updating fit...')
                excluded_stations += new_excluded_stations
                nstations_max -= len(new_excluded_stations)
            else:
                logging.info('No bad stations found.')
                break

        # Fit a TEC value to the phase solutions per source pair.
        # No iterative search for the global minimum is done
        if soln_type == 'scalarphase':
            r, source_selection = fit_tec_per_source_pair(
                phases0[:, station_selection, :, :],
                flags[:, station_selection, :, :],
                mask, freqs, propagate=True, nband_min=nband_min)
            if r is None:
                return 1
        else:
            r0, source_selection = fit_tec_per_source_pair(
                phases0[:, station_selection, :, :],
                flags[:, station_selection, :, :],
                mask, freqs, propagate=True, nband_min=nband_min)
            r1, source_selection = fit_tec_per_source_pair(
                phases1[:, station_selection, :, :],
                flags[:, station_selection, :, :],
                mask, freqs, propagate=True, nband_min=nband_min)
            if r0 is None or r1 is None:
                return 1

            # take the mean of the two polarizations
            r = (r0 + r1) / 2

    # Add stations by searching iteratively for global minimum in solution space
    station_selection, r = add_stations(station_selection, phases0,
        phases1, flags, mask, station_names, station_positions, source_names,
        source_selection, times, freqs, r, nband_min=nband_min,
        soln_type=soln_type, nstations_max=nstations_max, excluded_stations=
        excluded_stations, search_full_tec_range=False)

    # Save TEC values to the output solset
    solset = H.makeSolset(outSolset)
    dirs_out = []
    dirs_pos = []
    for s in source_selection:
        dirs_out.append(source_names[s])
        dirs_pos.append(source_positions[s])
    sourceTable = solset._f_get_child('source')
    sourceTable.append(zip(*(dirs_out, dirs_pos)))

    times_out = times

    ants_out = []
    ants_pos = []
    for s in station_selection:
        ants_out.append(station_names[s])
        ants_pos.append(station_positions[s])
    antennaTable = solset._f_get_child('antenna')
    antennaTable.append(zip(*(ants_out, ants_pos)))

    tf_st = H.makeSoltab(solset._v_name, 'tec', outSoltab,
        axesNames=['dir', 'time', 'ant'], axesVals=[dirs_out, times, ants_out],
        vals=r[source_selection, :, :],
        weights=np.ones_like(r[source_selection, :, :]))

    # Add history
    sw = solWriter(tf_st)
    sw.addHistory('CREATE (by TECFIT operation)')

    return 0
예제 #40
0
파일: clip.py 프로젝트: AHorneffer/losoto
def run( step, parset, H ):

    import numpy as np
    from losoto.h5parm import solFetcher, solWriter

    soltabs = getParSoltabs( step, parset, H )

    axesToClip = parset.getStringVector('.'.join(["LoSoTo.Steps", step, "Axes"]), [] )
    clipLevel = parset.getFloat('.'.join(["LoSoTo.Steps", step, "ClipLevel"]), 0. )
    log = parset.getBool('.'.join(["LoSoTo.Steps", step, "Log"]), True )
    
    if len(axesToClip) < 1:
        logging.error("Please specify axes to clip.")
        return 1
    if clipLevel == 0.:
        logging.error("Please specify factor above/below median at which to clip.")
        return 1

    for soltab in openSoltabs( H, soltabs ):

        logging.info("Clipping soltab: "+soltab._v_name)

        sf = solFetcher(soltab)

        # axis selection
        userSel = {}
        for axis in sf.getAxesNames():
            userSel[axis] = getParAxis( step, parset, H, axis )
        sf.setSelection(**userSel)

        # some checks
        for i, axis in enumerate(axesToClip[:]):
            if axis not in sf.getAxesNames():
                del axesToClip[i]
                logging.warning('Axis \"'+axis+'\" not found. Ignoring.')

        if sf.getType() != 'amplitude':
            logging.error('CLIP is for "amplitude" tables, not %s.' % sf.getType())
            continue

        sw = solWriter(soltab, useCache=True) # remember to flush()

        before_count=0
        after_count=0
        total=0
        for vals, weights, coord, selection in sf.getValuesIter(returnAxes=axesToClip, weight = True):

            total+=len(vals)
            before_count+=(len(weights)-np.count_nonzero(weights))

            # first find the median and standard deviation
            if (weights == 0).all():
                valmedian = 0
            else:
                if log:
                    valmedian = np.median(np.log10(vals[(weights != 0)]))
                    rms = np.std(np.log10(vals[(weights != 0)]))
                    np.putmask(weights, np.abs(np.log10(vals)-valmedian) > rms * clipLevel, 0)
                else:
                    valmedian = np.median(vals[(weights != 0)])
                    rms = np.std(vals[(weights != 0)])
                    np.putmask(weights, np.abs(vals-valmedian) > rms * clipLevel, 0)
        
            after_count+=(len(weights)-np.count_nonzero(weights))

            # writing back the solutions
            sw.selection = selection
            sw.setValues(weights, weight=True)

        sw.addHistory('CLIP (over %s with %s sigma cut)' % (axesToClip, clipLevel))
        logging.info('Clip, flagged data: %f %% -> %f %%' \
                % (100.*before_count/total, 100.*after_count/total))

        sw.flush()
        
    return 0
예제 #41
0
파일: tecjump.py 프로젝트: tammojan/losoto
def run( step, parset, H ):

    import scipy.ndimage.filters
    import numpy as np
    from losoto.h5parm import solFetcher, solWriter
    from scipy.optimize import minimize
    import itertools
    from scipy.interpolate import griddata
    import scipy.cluster.vq as vq

    def robust_std(data, sigma=3):
        """
        Calculate standard deviation excluding outliers
        ok with masked arrays
        """
        return np.std(data[np.where(np.abs(data) < sigma * np.std(data))])

    def mask_interp(vals, mask, method='nearest'):
        """
        return interpolated values for masked elements
        """
        this_vals = vals.copy()
        #this_vals[mask] = np.interp(np.where(mask)[0], np.where(~mask)[0], vals[~mask])
        #this_vals[mask] = griddata(np.where(~mask)[0], vals[~mask], np.where(mask)[0], method)

        # griddata has nan bug with nearest, I need to use vq
        code, dist = vq.vq(np.where(mask)[0], np.where(~mask)[0])
        this_vals[ np.where(mask)[0] ] = this_vals[code]
        
        return this_vals

    tec_jump_val = 0.019628
    maxsize = 300
    clip = 10 # TECs over these amount of jumps are flagged

    soltabs = getParSoltabs( step, parset, H )

    for soltab in openSoltabs( H, soltabs ):

        logging.info("Removing TEC jumps from soltab: "+soltab._v_name)

        sf = solFetcher(soltab)
        sw = solWriter(soltab) # remember to flush!

        # TODO: check if it's a Tec table

        # axis selection
        userSel = {}
        for axis in sf.getAxesNames():
            userSel[axis] = getParAxis( step, parset, H, axis )
        sf.setSelection(**userSel)

        for vals, weights, coord, selection in sf.getValuesIter(returnAxes='time', weight=True):

            # skip all flagged
            if (weights == 0).all(): continue
            # skip reference
            if (np.diff(vals[(weights == 1)]) == 0).all(): continue

            # kill large values
#            weights[abs(vals/tec_jump_val)>clip] = 0

            # interpolate flagged values to get resonable distances
#            vals = mask_interp(vals, mask=(weights == 0))/tec_jump_val
            # add edges to allow intervals to the borders
#            vals = np.insert(vals, 0, vals[0])
#            vals = np.insert(vals, len(vals), vals[-1])

            vals = np.fmod(vals,tec_jump_val)

#            def find_jumps(d_vals):
#                # jump poistion finder
#                d_smooth = scipy.ndimage.filters.median_filter( mask_interp(d_vals, mask=(abs(d_vals)>0.8)), 21 )
#                d_vals -= d_smooth
#                jumps = list(np.where(np.abs(d_vals) > 1.)[0])
#                return [0]+jumps+[len(d_vals)-1] # add edges
#
#            class Jump(object):
#                def __init__(self, jumps_idx, med):
#                    self.idx_left = jumps_idx[0]
#                    self.idx_right = jumps_idx[1]
#                    self.jump_left = np.rint(d_vals[self.idx_left])
#                    self.jump_right = np.rint(d_vals[self.idx_right])
#                    self.size = self.idx_right-self.idx_left
#                    self.hight = np.median(vals[self.idx_left+1:self.idx_right+1]-med)
#                    if abs((self.hight-self.jump_left)-med) > abs((self.hight-self.jump_right)-med):
#                        self.closejump = self.jump_right
#                    else:
#                        self.closejump = self.jump_left
#    
#            i = 0
#            while i<len(coord['time']):
#                # get tec[i] - tec[i+1], i.e. the derivative assuming constant timesteps
#                # this is in units of tec_jump_val!
#                d_vals = np.diff(vals)
#                # get jumps idx, idx=n means a jump beteen val n and n+1
#                jumps_idx = find_jumps(d_vals)
#
#                # get regions
#                med = np.median(vals)
#                jumps = [Jump(jump_idx, med) for jump_idx in zip( jumps_idx[:-1], jumps_idx[1:] )]
#                jumps = [jump for jump in jumps if jump.closejump != 0]
#                jumps = [jump for jump in jumps if jump.size != 0] # prevent bug on edges
#                jumps = [jump for jump in jumps if jump.size < maxsize]
#
#                jumps.sort(key=lambda x: (np.abs(x.size), x.hight), reverse=False) #smallest first
#                #print [(j.hight, j.closejump) for j in jumps]
#
#                plot = False
#                if plot:
#                    import matplotlib.pyplot as plt
#                    fig, ((ax1, ax2, ax3)) = plt.subplots(3, 1, sharex=True)
#                    fig.subplots_adjust(hspace=0)
#                    d_smooth = scipy.ndimage.filters.median_filter( mask_interp(d_vals, mask=(abs(d_vals)>0.8)), 31 )
#                    ax1.plot(d_vals,'k-')
#                    ax2.plot(d_smooth,'k-')
#                    ax3.plot(vals, 'k-')
#                    [ax3.axvline(jump_idx+0.5, color='r', ls=':') for jump_idx in jumps_idx]
#                    ax1.set_ylabel('d_vals')
#                    ax2.set_ylabel('d_vals - smooth')
#                    ax3.set_ylabel('TEC/jump')
#                    ax3.set_xlabel('timestep')
#                    ax1.set_xlim(xmin=-10, xmax=len(d_smooth)+10)
#                    fig.savefig('plots/%stecjump_debug_%03i' % (coord['ant'], i))
#                i+=1
#
#                if len(jumps) == 0: 
#                    break
#
#                # move down the highest to the side closest to the median
#                j = jumps[0]
#                #print j.idx_left, j.idx_right, j.jump_left, j.jump_right, j.hight, j.closejump
#
#                vals[j.idx_left+1:j.idx_right+1] -= j.closejump
#                logging.debug("%s: Number of jumps left: %i - Removing jump: %i - Size %i" % (coord['ant'], len(jumps_idx)-2, j.closejump, j.size))
                
            # re-create proper vals
#            vals = vals[1:-1]*tec_jump_val
            # set back to 0 the values for flagged data
            vals[weights == 0] = 0

            sw.selection = selection
            sw.setValues(vals)
            sw.setValues(weights, weight=True)

        sw.addHistory('TECJUMP')
        del sf
        del sw
    return 0
예제 #42
0
def run( step, parset, H ):
    from losoto.h5parm import solFetcher, solWriter
    import numpy as np
    import scipy.optimize

    delaycomplex = lambda d, freq, y: abs(np.cos(d[0]*freq)  - np.cos(y)) + abs(np.sin(d[0]*freq)  - np.sin(y))

    # get involved solsets using local step values or global values or all
    soltabs = getParSoltabs( step, parset, H )

    refAnt = parset.getString('.'.join(["LoSoTo.Steps", step, "RefAnt"]), '' )
    outTab = parset.getString('.'.join(["LoSoTo.Steps", step, "OutTable"]), 'phasediff' )
    maxres = parset.getFloat('.'.join(["LoSoTo.Steps", step, "MaxResidual"]), 1.)

    for t, soltab in enumerate(openSoltabs( H, soltabs )):
        logging.info("--> Working on soltab: "+soltab._v_name)
        sf = solFetcher(soltab)

        # times and ants needs to be complete or selection is much slower
        times = sf.getAxisValues('time')
        ants = sf.getAxisValues('ant')

        # this will make a selection for the getValues() and getValuesIter()
        userSel = {}
        for axis in sf.getAxesNames():
            userSel[axis] = getParAxis( step, parset, H, axis )
        sf.setSelection(**userSel)

        # some checks
        solType = sf.getType()
        if solType != 'phase':
           logging.warning("Soltab type of "+soltab._v_name+" is of type "+solType+", should be phase. Ignoring.")
           continue

        if refAnt != '' and not refAnt in ants:
            logging.error('Reference antenna '+refAnt+' not found.')
            return 1
        if refAnt == '': refAnt = ants[0]

        # create new table
        solsetname = soltabs[t].split('/')[0]
        st = H.makeSoltab(solsetname, soltype = sf.getType(), soltab = outTab, axesNames=sf.getAxesNames(), \
                          axesVals=[sf.getAxisValues(axisName) for axisName in sf.getAxesNames()], \
                          vals=sf.getValues(retAxesVals = False), weights=sf.getValues(weight = True, retAxesVals = False), parmdbType=sf.t._v_attrs['parmdb_type'])
        sw = solWriter(st)
        sw.addHistory('Created by CROSSDELAY operation.')
            
        for vals, weights, coord, selection in sf.getValuesIter(returnAxes=['freq','pol','time'], weight=True, reference = refAnt):

            fitdelayguess = 1.e-10 # good guess, do not use 0 as it seems the minimizer is unstable with that

            if 'RR' in coord['pol'] and 'LL' in coord['pol']:
                coord1 = np.where(coord['pol'] == 'RR')[0][0]
                coord2 = np.where(coord['pol'] == 'LL')[0][0]
            elif 'XX' in coord['pol'] and 'YY' in coord['pol']:
                coord1 = np.where(coord['pol'] == 'XX')[0][0]
                coord2 = np.where(coord['pol'] == 'YY')[0][0]

            if not coord['ant'] == refAnt:
                logging.debug('Working on ant: '+coord['ant']+'...')

                if (weights == 0.).all() == True:
                    logging.warning('Skipping flagged antenna: '+coord['ant'])
                    weights[:] = 0
                else:

                    for t, time in enumerate(times):

                        # apply flags
                        idx       = ((weights[coord1,:,t] != 0.) & (weights[coord2,:,t] != 0.))
                        freq      = np.copy(coord['freq'])[idx]
                        phase1    = vals[coord1,:,t][idx]
                        phase2    = vals[coord2,:,t][idx]
    
                        if len(freq) < 10:
                            vals[:,:,t] = 0.
                            weights[:,:,t] = 0.
                            logging.debug('Not enough unflagged point for the timeslot '+str(t))
                            continue
            
                        if (len(idx) - len(freq))/len(freq) > 1/4.:
                            logging.debug('High number of filtered out data points for the timeslot '+str(t)+': '+str(len(idx) - len(freq)))
            
                        phase_diff  = (phase1 - phase2)
        
                        fitresultdelay, success = scipy.optimize.leastsq(delaycomplex, [fitdelayguess], args=(freq, phase_diff))
                        #if t%100==0: print fitresultdelay
                        # fractional residual
                        residual = np.mean(np.abs(np.mod(fitresultdelay*freq-phase_diff + np.pi, 2.*np.pi) - np.pi))

                        #print "t:", t, "result:", fitresultdelay, "residual:", residual
    
                        if residual < maxres:
                            fitdelayguess = fitresultdelay[0]
                            weight = 1
                        else:       
                            # high residual, flag
                            logging.warning('Bad solution for ant: '+coord['ant']+' (time: '+str(t)+', resdiaul: '+str(residual)+').')
                            weight = 0

                        vals[:,:,t] = 0.
                        vals[coord1,:,t][idx] = fitresultdelay[0]*freq/2.
                        vals[coord2,:,t][idx] = -1.*(fitresultdelay[0]*freq)/2.
                        weights[:,:,t] = 0.
                        weights[coord1,:,t][idx] = weight
                        weights[coord2,:,t][idx] = weight

                        # Debug plot
                        doplot = False
                        if doplot and t%500==0 and coord['ant'] == 'CS004LBA':
                            if not 'matplotlib' in sys.modules:
                                import matplotlib as mpl
                                mpl.rc('font',size =8 )
                                mpl.rc('figure.subplot',left=0.05, bottom=0.05, right=0.95, top=0.95,wspace=0.22, hspace=0.22 )
                                mpl.use("Agg")
                            import matplotlib.pyplot as plt
    
                            fig = plt.figure()
                            fig.subplots_adjust(wspace=0)
                            ax = fig.add_subplot(111)
    
                            # plot rm fit
                            plotdelay = lambda delay, freq: np.mod( delay*freq + np.pi, 2.*np.pi) - np.pi
                            ax.plot(freq, plotdelay(fitresultdelay[0], freq), "-", color='purple')
    
                            ax.plot(freq, np.mod(phase1 + np.pi, 2.*np.pi) - np.pi, 'ob' )
                            ax.plot(freq, np.mod(phase2 + np.pi, 2.*np.pi) - np.pi, 'og' )
                            ax.plot(freq, np.mod(phase_diff + np.pi, 2.*np.pi) - np.pi , '.', color='purple' )                           
         
                            residual = np.mod(plotdelay(fitresultdelay[0], freq)-phase_diff + np.pi,2.*np.pi)-np.pi
                            ax.plot(freq, residual, '.', color='yellow')
            
                            ax.set_xlabel('freq')
                            ax.set_ylabel('phase')
                            ax.set_ylim(ymin=-np.pi, ymax=np.pi)
        
                            logging.warning('Save pic: '+str(t)+'_'+coord['ant']+'.png')
                            plt.savefig(coord['ant']+'_'+str(t)+'.png', bbox_inches='tight')
                            del fig

            sw.setSelection(**coord)
            sw.setValues( vals )
            sw.setValues( weights, weight=True )

        del st
        del sw        
        del sf
    return 0
예제 #43
0
파일: flag.py 프로젝트: twshimwell/losoto
def run(step, parset, H):

    from losoto.h5parm import solFetcher, solWriter

    soltabs = getParSoltabs(step, parset, H)

    #check_parset('Axes','MaxCycles','MaxRms','Order','Replace','PreFlagZeros')
    axesToFlag = parset.getStringVector(
        '.'.join(["LoSoTo.Steps", step, "Axes"]), 'time')
    maxCycles = parset.getInt('.'.join(["LoSoTo.Steps", step, "MaxCycles"]), 5)
    maxRms = parset.getFloat('.'.join(["LoSoTo.Steps", step, "MaxRms"]), 5.)
    fixRms = parset.getFloat('.'.join(["LoSoTo.Steps", step, "FixRms"]), 0)
    order = parset.getIntVector('.'.join(["LoSoTo.Steps", step, "Order"]), 3)
    replace = parset.getBool('.'.join(["LoSoTo.Steps", step, "Replace"]),
                             False)
    preflagzeros = parset.getBool(
        '.'.join(["LoSoTo.Steps", step, "PreFlagZeros"]), False)
    mode = parset.getString('.'.join(["LoSoTo.Steps", step, "Mode"]), 'smooth')
    ref = parset.getString('.'.join(["LoSoTo.Steps", step, "Reference"]), '')
    ncpu = parset.getInt('.'.join(["LoSoTo.Ncpu"]), 1)

    if ref == '': ref = None

    if axesToFlag == []:
        logging.error("Please specify axis to flag. It must be a single one.")
        return 1

    if len(axesToFlag) != len(order):
        logging.error("AxesToFlag and order must be both 1 or 2 values.")
        return 1

    if len(order) == 1: order = order[0]
    elif len(order) == 2: order = tuple(order)

    mode = mode.lower()
    if mode != 'smooth' and mode != 'poly' and mode != 'spline':
        logging.error('Mode must be smooth, poly or spline')
        return 1

    # start processes for multi-thread
    mpm = multiprocManager(ncpu, flag)

    for soltab in openSoltabs(H, soltabs):

        logging.info("Flagging soltab: " + soltab._v_name)

        sf = solFetcher(soltab)
        sw = solWriter(soltab, useCache=True)  # remember to flush!

        # axis selection
        userSel = {}
        for axis in sf.getAxesNames():
            userSel[axis] = getParAxis(step, parset, H, axis)
        sf.setSelection(**userSel)

        for axisToFlag in axesToFlag:
            if axisToFlag not in sf.getAxesNames():
                logging.error('Axis \"' + axis + '\" not found.')
                return 1

        # reorder axesToFlag as axes in the table
        axesToFlag_orig = axesToFlag
        axesToFlag = [
            coord for coord in sf.getAxesNames() if coord in axesToFlag
        ]
        if type(order) is int: order = [order]
        if axesToFlag_orig != axesToFlag:
            order = order[::-1]  # reverse order if we changed axesToFlag

        solType = sf.getType()

        # fill the queue (note that sf and sw cannot be put into a queue since they have file references)
        for vals, weights, coord, selection in sf.getValuesIter(
                returnAxes=axesToFlag, weight=True, reference=ref):
            mpm.put([
                vals, weights, coord, solType, order, mode, preflagzeros,
                maxCycles, fixRms, maxRms, replace, axesToFlag, selection
            ])
            #v, w, sel = flag(vals, weights, coord, solType, order, mode, preflagzeros, maxCycles, fixRms, maxRms, replace, axesToFlag, selection)

        mpm.wait()

        for v, w, sel in mpm.get():
            sw.selection = sel
            if replace:
                # rewrite solutions (flagged values are overwritten)
                sw.setValues(v, weight=False)
            else:
                sw.setValues(w, weight=True)

        sw.flush()
        sw.addHistory('FLAG (over %s with %s sigma cut)' %
                      (axesToFlag, maxRms))

        del sw
        del sf
        del soltab

    return 0
예제 #44
0
def run( step, parset, H ):
    """
    Separate phase solutions into FR, Clock and TEC.

    The Clock and TEC values are stored in the specified output soltab with type 'clock', 'tec', 'FR'.
    """
    from losoto.h5parm import solFetcher, solWriter
    import numpy as np
    import scipy.optimize

    rmwavcomplex = lambda RM, wav, y: abs(np.cos(2.*RM[0]*wav*wav)  - np.cos(y)) + abs(np.sin(2.*RM[0]*wav*wav)  - np.sin(y))
    c = 2.99792458e8

    # get involved solsets using local step values or global values or all
    soltabs = getParSoltabs( step, parset, H )

    refAnt = parset.getString('.'.join(["LoSoTo.Steps", step, "RefAnt"]), '' )
    ncpu = parset.getInt('.'.join(["LoSoTo.Ncpu"]), 1 )

    for t, soltab in enumerate(openSoltabs( H, soltabs )):
        logging.info("--> Working on soltab: "+soltab._v_name)
        sf = solFetcher(soltab)

        # times and ants needs to be complete or selection is much slower
        times = sf.getAxisValues('time')
        ants = sf.getAxisValues('ant')

        # this will make a selection for the getValues() and getValuesIter()
        userSel = {}
        for axis in sf.getAxesNames():
            userSel[axis] = getParAxis( step, parset, H, axis )
        sf.setSelection(**userSel)

        # some checks
        solType = sf.getType()
        if solType != 'phase':
           logging.warning("Soltab type of "+soltab._v_name+" is of type "+solType+", should be phase. Ignoring.")
           continue

        if refAnt != '' and not refAnt in ants:
            logging.error('Reference antenna '+refAnt+' not found.')
            return 1
        if refAnt == '': refAnt = ants[0]

        solsetname = soltabs[t].split('/')[0]
        st = H.makeSoltab(solsetname, 'rotationmeasure',
                                 axesNames=['ant','time'], axesVals=[ants, times],
                                 vals=np.zeros((len(ants),len(times))),
                                 weights=np.ones((len(ants),len(times))))
        sw = solWriter(st)
        sw.addHistory('Created by FARADAY operation.')
            
        for vals, weights, coord, selection in sf.getValuesIter(returnAxes=['freq','pol','time'], weight=True, reference = refAnt):

            if len(coord['freq']) < 10:
                logging.error('Faraday rotation estimation needs at least 10 frequency channels, preferably distributed over a wide range.')
                return 1

            fitrm = np.zeros(len(times))
            fitweights = np.ones(len(times))
            fitrmguess = 0 # good guess

            if 'RR' in coord['pol'] and 'LL' in coord['pol']:
                coord_rr = np.where(coord['pol'] == 'RR')[0][0]
                coord_ll = np.where(coord['pol'] == 'LL')[0][0]
            elif 'XX' in coord['pol'] and 'YY' in coord['pol']:
                logging.warning('Linear polarization detected, LoSoTo assumes XX->RR and YY->LL.')
                coord_rr = np.where(coord['pol'] == 'XX')[0][0]
                coord_ll = np.where(coord['pol'] == 'YY')[0][0]
            else:
                logging.error("Cannot proceed with Faraday estimation with polarizations: "+str(coord['pol']))
                return 1

            if not coord['ant'] == refAnt:
                logging.debug('Working on ant: '+coord['ant']+'...')

                for t, time in enumerate(times):

                    # apply flags
                    idx       = ((weights[0,:,t] != 0.) & (weights[1,:,t] != 0.))
                    freq      = np.copy(coord['freq'])[idx]
                    phase_rr  = vals[coord_rr,:,t][idx]
                    phase_ll  = vals[coord_ll,:,t][idx]

                    if (len(weights[0,:,t]) - len(idx))/len(weights[0,:,t]) > 1/4.:
                        logging.debug('High number of filtered out data points for the timeslot '+str(t)+': '+str(len(weights[0,:,t]) - len(idx)))
        
                    if len(freq) < 10:
                        fitweights[t] = 0
                        logging.warning('No valid data found for Faraday fitting for antenna: '+coord['ant'])
                        continue
        
                    # RR-LL to be consistent with BBS/NDPPP
                    phase_diff  = (phase_rr - phase_ll)      # not divide by 2 otherwise jump problem, then later fix this
                    wav = c/freq
    
                    fitresultrm_wav, success = scipy.optimize.leastsq(rmwavcomplex, [fitrmguess], args=(wav, phase_diff))
                    # fractional residual
                    residual = np.mean(np.abs(np.mod((2.*fitresultrm_wav*wav*wav)-phase_diff,2.*np.pi) - np.pi))

#                    print "t:", t, "result:", fitresultrm_wav, "residual:", residual

                    if residual > 0.5:
                        fitrmguess = fitresultrm_wav[0]
                        weight = 1
                    else:       
                        # high residual, flag
                        logging.warning('Bad solution for ant: '+coord['ant']+' (time: '+str(t)+', resdiaul: '+str(residual)+').')
                        weight = 0

                    # Debug plot
                    doplot = False
                    if doplot and coord['ant'] == 'RS310LBA' and t%10==0:
                        print "Plotting"
                        if not 'matplotlib' in sys.modules:
                            import matplotlib as mpl
                            mpl.rc('font',size =8 )
                            mpl.rc('figure.subplot',left=0.05, bottom=0.05, right=0.95, top=0.95,wspace=0.22, hspace=0.22 )
                            mpl.use("Agg")
                        import matplotlib.pyplot as plt

                        fig = plt.figure()
                        fig.subplots_adjust(wspace=0)
                        ax = fig.add_subplot(110)

                        # plot rm fit
                        plotrm = lambda RM, wav: np.mod( (2.*RM*wav*wav) + np.pi, 2.*np.pi) - np.pi # notice the factor of 2
                        ax.plot(freq, plotrm(fitresultrm_wav, c/freq[:]), "-", color='purple')

                        ax.plot(freq, np.mod(phase_rr + np.pi, 2.*np.pi) - np.pi, 'ob' )
                        ax.plot(freq, np.mod(phase_ll + np.pi, 2.*np.pi) - np.pi, 'og' )
                        ax.plot(freq, np.mod(phase_diff + np.pi, 2.*np.pi) - np.pi , '.', color='purple' )                           
     
                        residual = np.mod(plotrm(fitresultrm_wav, c/freq[:])-phase_diff+np.pi,2.*np.pi)-np.pi
                        ax.plot(freq, residual, '.', color='yellow')
        
                        ax.set_xlabel('freq')
                        ax.set_ylabel('phase')
                        ax.set_ylim(ymin=-np.pi, ymax=np.pi)
    
                        logging.warning('Save pic: '+str(t)+'_'+coord['ant']+'.png')
                        plt.savefig(str(t)+'_'+coord['ant']+'.png', bbox_inches='tight')
                        del fig

                    fitrm[t] = fitresultrm_wav[0]
                    fitweights[t] = weight

            sw.setSelection(ant=coord['ant'], time=coord['time'])
            sw.setValues( np.expand_dims(fitrm, axis=1) )
            sw.setValues( np.expand_dims(fitweights, axis=1), weight=True )

        del st
        del sw        
        del sf
    return 0
예제 #45
0
def run( step, parset, H ):
    """
    subtract a clock and/or tec from a phase.
    """
    import numpy as np
    from losoto.h5parm import solFetcher, solWriter

    soltabs = getParSoltabs( step, parset, H )
    soltabsToSub = parset.getStringVector('.'.join(["LoSoTo.Steps", step, "Sub"]), [] )
    ratio = parset.getBool('.'.join(["LoSoTo.Steps", step, "Ratio"]), False )

    for soltab in openSoltabs( H, soltabs ):
        logging.info("--> Working on soltab: "+soltab._v_name)

        sf = solFetcher(soltab)
        sw = solWriter(soltab, useCache = True)

        sfss = [] # sol fetcher to sub tables
        for soltabToSub in soltabsToSub:
            ss, st = soltabToSub.split('/')
            sfs = solFetcher(H.getSoltab(ss, st))
            if sf.getType() != 'phase' and (sfs.getType() == 'tec' or sfs.getType() == 'clock' or sfs.getType() == 'rotationmeasure' or sfs.getType() == 'tec3rd'):
                logging.warning(soltabToSub+' is of type clock/tec/rm and should be subtracted from a phase. Skipping it.')
                continue
            sfss.append( sfs )
            logging.info('Subtracting table: '+soltabToSub)

            # a major speed up if tables are assumed with same axes, check that (should be the case in almost any case)
            for axisName in sfs.getAxesNames():
                assert all(sfs.getAxisValues(axisName) == sf.getAxisValues(axisName))
        
        if sf.getType() == 'phase' and (sfs.getType() == 'tec' or sfs.getType() == 'clock' or sfs.getType() == 'rotationmeasure' or sfs.getType() == 'tec3rd' ):
            # the only return axes is freq, slower but better code
            for vals, weights, coord, selection in sf.getValuesIter(returnAxes='freq', weight = True):

                for sfs in sfss:

                    # restrict to have the same coordinates of phases
                    for i, axisName in enumerate(sfs.getAxesNames()):
                        sfs.selection[i] = selection[sf.getAxesNames().index(axisName)]

                    valsSub = np.squeeze(sfs.getValues(retAxesVals=False, weight=False))
                    weightsSub = np.squeeze(sfs.getValues(retAxesVals=False, weight=True))

                    if sfs.getType() == 'clock':
                        vals -= 2. * np.pi * valsSub * coord['freq']

                    elif sfs.getType() == 'tec':
                        vals -= -8.44797245e9 * valsSub / coord['freq']

                    elif sfs.getType() == 'tec3rd':
                        vals -= - 1.e21 * valsSub / np.power(coord['freq'],3)

                    elif sfs.getType() == 'rotationmeasure':
                        wav = 2.99792458e8/coord['freq']
                        ph = wav * wav * valsSub
                        if coord['pol'] == 'XX':
                            vals += ph
                        elif coord['pol'] == 'YY':
                            vals -= ph
                    else:
                        vals -= valsSub

                    # flag data that are contaminated by flagged clock/tec data
                    if weightsSub == 0: weights[:] = 0

                sw.selection = selection
                sw.setValues(vals)
                sw.setValues(weights, weight = True)
        else:
                if ratio: sw.setValues((sf.getValues(retAxesVals=False)-sfs.getValues(retAxesVals=False))/sfs.getValues(retAxesVals=False))
                else: sw.setValues(sf.getValues(retAxesVals=False)-sfs.getValues(retAxesVals=False))
                weight = sf.getValues(retAxesVals=False, weight=True)
                weight[sfs.getValues(retAxesVals=False, weight=True) == 0] = 0
                sw.setValues(weight, weight = True)
            
        sw.addHistory('RESIDUALS by subtracting tables '+' '.join(soltabsToSub))
        sw.flush()
        del sf
        del sw
        
    return 0
예제 #46
0
def run(step, parset, H):
    """
    Separate phase solutions into Clock and TEC.

    Phase solutions are assumed to be stored in solsets of the H5parm file, one
    solset per field.

    The Clock and TEC values are stored in the specified output soltab with type 'clock' and 'tec'.

    """
    import numpy as np
    from losoto.h5parm import solFetcher, solWriter

    # get involved solsets using local step values or global values or all
    soltabs = getParSoltabs(step, parset, H)

    flagBadChannels = parset.getBool(
        '.'.join(["LoSoTo.Steps", step, "FlagBadChannels"]), True)
    flagCut = parset.getFloat('.'.join(["LoSoTo.Steps", step, "FlagCut"]), 5.)
    chi2cut = parset.getFloat('.'.join(["LoSoTo.Steps", step, "Chi2cut"]),
                              3000.)
    combinePol = parset.getBool('.'.join(["LoSoTo.Steps", step, "CombinePol"]),
                                False)
    #fitOffset = parset.getBool('.'.join(["LoSoTo.Steps", step, "FitOffset"]), False )
    removePhaseWraps = parset.getBool(
        '.'.join(["LoSoTo.Steps", step, "RemovePhaseWraps"]), True)
    fit3rdorder = parset.getBool(
        '.'.join(["LoSoTo.Steps", step, "Fit3rdOrder"]), False)
    circular = parset.getBool('.'.join(["LoSoTo.Steps", step, "Circular"]),
                              False)
    reverse = parset.getBool('.'.join(["LoSoTo.Steps", step, "Reverse"]),
                             False)

    # do something on every soltab (use the openSoltab LoSoTo function)
    #for soltab in openSoltabs( H, soltabs ):
    for soltabname in soltabs:
        solsetname = soltabname.split('/')[0]
        soltab = H.getSoltab(solset=solsetname,
                             soltab=soltabname.split('/')[1])
        logging.info("--> Working on soltab: " + soltab._v_name)
        t = solFetcher(soltab)
        tw = solWriter(soltab)

        # some checks
        solType = t.getType()
        if solType != 'phase':
            logging.warning("Soltab type of " + soltab._v_name + " is: " +
                            solType + " should be phase. Ignoring.")
            continue

        # this will make a selection for the getValues() and getValuesIter()
        userSel = {}
        for axis in t.getAxesNames():
            userSel[axis] = getParAxis(step, parset, H, axis)
        t.setSelection(**userSel)

        # Collect station properties
        station_dict = H.getAnt(solsetname)
        stations = t.getAxisValues('ant')
        station_positions = np.zeros((len(stations), 3), dtype=np.float)
        for i, station_name in enumerate(stations):
            station_positions[i, 0] = station_dict[station_name][0]
            station_positions[i, 1] = station_dict[station_name][1]
            station_positions[i, 2] = station_dict[station_name][2]

        returnAxes = ['ant', 'freq', 'pol', 'time']
        for vals, flags, coord, selection in t.getValuesIter(
                returnAxes=returnAxes, weight=True):

            if len(coord['ant']) < 2:
                logging.error(
                    'Clock/TEC separation needs at least 2 antennas selected.')
                return 1
            if len(coord['freq']) < 10:
                logging.error(
                    'Clock/TEC separation needs at least 10 frequency channels, preferably distributed over a wide range'
                )
                return 1

            freqs = coord['freq']
            stations = coord['ant']
            times = coord['time']

            # get axes index
            axes = [i for i in t.getAxesNames() if i in returnAxes]

            # reverse time axes
            if reverse:
                vals = np.swapaxes(
                    np.swapaxes(vals, 0, axes.index('time'))[::-1], 0,
                    axes.index('time'))
                flags = np.swapaxes(
                    np.swapaxes(flags, 0, axes.index('time'))[::-1], 0,
                    axes.index('time'))

            result=doFit(vals,flags==0,freqs,stations,station_positions,axes,\
                             flagBadChannels=flagBadChannels,flagcut=flagCut,chi2cut=chi2cut,combine_pol=combinePol,removePhaseWraps=removePhaseWraps,fit3rdorder=fit3rdorder,circular=circular)
            if fit3rdorder:
                clock, tec, offset, tec3rd = result
                if reverse:
                    clock = clock[::-1, :]
                    tec = tec[::-1, :]
                    tec3rd = tec3rd[::-1, :]
            else:
                clock, tec, offset = result
                if reverse:
                    clock = clock[::-1, :]
                    tec = tec[::-1, :]

            weights = tec > -5
            tec[np.logical_not(weights)] = 0
            clock[np.logical_not(weights)] = 0
            weights = np.float16(weights)

            if combinePol:
                tf_st = H.makeSoltab(solsetname,
                                     'tec',
                                     axesNames=['time', 'ant'],
                                     axesVals=[times, stations],
                                     vals=tec[:, :, 0],
                                     weights=weights[:, :, 0])
                sw = solWriter(tf_st)
                sw.addHistory('CREATE (by CLOCKTECFIT operation)')
                tf_st = H.makeSoltab(solsetname,
                                     'clock',
                                     axesNames=['time', 'ant'],
                                     axesVals=[times, stations],
                                     vals=clock[:, :, 0] * 1e-9,
                                     weights=weights[:, :, 0])
                sw = solWriter(tf_st)
                sw.addHistory('CREATE (by CLOCKTECFIT operation)')
                tf_st = H.makeSoltab(solsetname,
                                     'phase_offset',
                                     axesNames=['ant'],
                                     axesVals=[stations],
                                     vals=offset[:, 0],
                                     weights=np.ones_like(offset[:, 0],
                                                          dtype=np.float16))
                sw = solWriter(tf_st)
                sw.addHistory('CREATE (by CLOCKTECFIT operation)')
                if fit3rdorder:
                    tf_st = H.makeSoltab(solsetname,
                                         'tec3rd',
                                         axesNames=['time', 'ant'],
                                         axesVals=[times, stations],
                                         vals=tec3rd[:, :, 0],
                                         weights=weights[:, :, 0])
                    sw = solWriter(tf_st)
            else:
                tf_st = H.makeSoltab(solsetname,
                                     'tec',
                                     axesNames=['time', 'ant', 'pol'],
                                     axesVals=[times, stations, ['XX', 'YY']],
                                     vals=tec,
                                     weights=weights)
                sw = solWriter(tf_st)
                sw.addHistory('CREATE (by CLOCKTECFIT operation)')
                tf_st = H.makeSoltab(solsetname,
                                     'clock',
                                     axesNames=['time', 'ant', 'pol'],
                                     axesVals=[times, stations, ['XX', 'YY']],
                                     vals=clock * 1e-9,
                                     weights=weights)
                sw = solWriter(tf_st)
                sw.addHistory('CREATE (by CLOCKTECFIT operation)')
                tf_st = H.makeSoltab(solsetname,
                                     'phase_offset',
                                     axesNames=['ant', 'pol'],
                                     axesVals=[stations, ['XX', 'YY']],
                                     vals=offset,
                                     weights=np.ones_like(offset,
                                                          dtype=np.float16))
                sw = solWriter(tf_st)
                sw.addHistory('CREATE (by CLOCKTECFIT operation)')
                if fit3rdorder:
                    tf_st = H.makeSoltab(
                        solsetname,
                        'tec3rd',
                        axesNames=['time', 'ant', 'pol'],
                        axesVals=[times, stations, ['XX', 'YY']],
                        vals=tec3rd,
                        weights=weights)
                    sw = solWriter(tf_st)
    return 0
예제 #47
0
elapsed = (time.clock() - start)
logging.info("PARMDB -- "+str(elapsed)+" s.")

start = time.clock()
for i in xrange(n):
    H.setSelection(dir='pointing', ant='RS*')
    Hrot = H.getValues(retAxesVals = False)
elapsed = (time.clock() - start)
logging.info("H5parm -- "+str(elapsed)+" s.")

#print "Equal?", (Prot == np.squeeze(Hrot)).all()

######################################################
# read+write
logging.info("### Read all rotations for a dir/station and write them back")
Hw = solWriter(H5.getSoltab(solset,'rotation000'))

start = time.clock()
for i in xrange(n):
    Prot = P.getValuesGrid('CommonRotationAngle:CS001LBA')
    Prot = {'test'+str(i):Prot['CommonRotationAngle:CS001LBA']}
    P2.addValues(Prot)
    # parmdb write?
elapsed = (time.clock() - start)
logging.info("PARMDB -- "+str(elapsed)+" s.")

start = time.clock()
for i in xrange(n):
    H.setSelection(dir='pointing', ant='CS001LBA')
    Hrot = H.getValues(retAxesVals = False)
    Hw.setSelection(dir='pointing', ant='CS001LBA')
예제 #48
0
logging.info("Create soltab (using default name)")
H5.makeSoltab(ss,
              'amplitude',
              axesNames=['axis1', 'axis2', 'axis3'],
              axesVals=axesVals,
              vals=vals,
              weights=vals)
logging.info('Get a soltab object')
st = H5.getSoltab(ss, 'stTest')
logging.info('Get all soltabs:')
print H5.getSoltabs(ss)

print "###########################################"
logging.info('### solFetcher/solWriter - General')
Hsf = solFetcher(st)
Hsw = solWriter(st)
logging.info('Get solution Type (exp: amplitude)')
print Hsf.getType()
logging.info('Get Axes Names')
print Hsf.getAxesNames()
logging.info('Get Axis1 Len (exp: 4)')
print Hsf.getAxisLen('axis1')
logging.info('Get Axis1 Type (exp: str)')
print Hsf.getAxisType('axis1')
logging.info('Get Axis2 Type (exp: float)')
print Hsf.getAxisType('axis2')
logging.info('Get Axis1 Values (exp: a,b,c,d)')
print Hsf.getAxisValues('axis1')
logging.info('Set new axes values')
Hsw.setAxisValues('axis1', ['e', 'f', 'g', 'h'])
logging.info('Get new Axis1 Values (exp: e,f,g,h)')