Ejemplo n.º 1
0
def load(dataset, indices, vis, weights, flags, err):
    """Load data from lazy indexers into existing storage.
    This is optimised for the MVF v4 case where we can use dask directly
    to eliminate one copy, and also load vis, flags and weights in parallel.
    In older formats it causes an extra copy.
    Parameters
    ----------
    dataset : :class:`katdal.DataSet`
        Input dataset, possibly with an existing selection
    indices : tuple
        Slice expression for subsetting the dataset
    vis, flags : array-like
        Outputs, which must have the correct shape and type
    """

    t_min = indices[0].start
    t_max = indices[0].stop
    in_time_slices = [
        slice(ts, min(ts + CHUNK_SIZE, t_max))
        for ts in range(t_min, t_max, CHUNK_SIZE)
    ]
    for in_ts in in_time_slices:
        out_ts = slice(in_ts.start - t_min, in_ts.stop - t_min)
        out_vis = vis[out_ts]
        out_weights = weights[out_ts]
        out_flags = flags[out_ts]
        for i in range(NUM_RETRIES):
            try:
                if isinstance(dataset.vis, DaskLazyIndexer):
                    DaskLazyIndexer.get(
                        [dataset.vis, dataset.weights, dataset.flags],
                        in_ts,
                        out=[out_vis, out_weights, out_flags])
                else:
                    out_vis[:] = dataset.vis[in_ts]
                    out_weights[:] = dataset.weights[in_ts]
                    out_flags[:] = dataset.flags[in_ts]
                break
            except (StoreUnavailable, socket.timeout):
                msg = 'Timeout when reading dumps %d to %d. Try %d/%d....' % (
                    out_ts.start + 1, out_ts.stop, i + 1, NUM_RETRIES)
                OErr.PLog(err, OErr.Warn, msg)
                OErr.printErr(err)
                print(msg)
        # Flag the data and warn if we can't get it
        if i == NUM_RETRIES - 1:
            msg = 'Too many timeouts, flagging dumps %d to %d' % (
                out_ts.start + 1, out_ts.stop)
            OErr.PLog(err, OErr.Warn, msg)
            OErr.printErr(err)
            print(msg)
            flags[out_ts] = True
Ejemplo n.º 2
0
def PWebFetch(url, args, outfile, err):
    """ 
    Generic Fetch a web product

    Sends a http post request to url and sames response to outfile
    Throws exception on error
    * url     = where the request to be sent,
                e.g."https://www.cv.nrao.edu/cgi-bin/postage.pl"
    * args    = dict or arguments, e.g. {"arg1":"arg1"}
    * outfile = Name of the output file, absolute path or relative to CWD
                None => use name from server
    * err     = Python Obit Error/message stack
    """
    ################################################################
    # Package parameters
    encoded_args = six.moves.urllib.parse.urlencode(args)
    NVSShost = "https://www.cv.nrao.edu/cgi-bin/postage.pl"
    # fetch
    try:
        request = six.moves.urllib.request.Request(url)
        response = six.moves.urllib.request.urlopen(request, encoded_args)
        data = response.read()
    except Exception as exception:
        print(exception)
        OErr.PLog(err, OErr.Error, "Request from server failed")
        OErr.printErrMsg(err)
    if outfile == None:  # Name from server?
        outfile = os.path.basename(response.headers['URI'])
    fd = open(outfile, "wb")
    fd.write(data)
    fd.close()
    # Info
    print("Response code =", response.code, response.msg)
    print("Response type =", response.headers["Content-Type"])
Ejemplo n.º 3
0
def PWebFetch(url, args, outfile, err):
    """ 
    Generic Fetch a web product

    Sends a http post request to url and sames response to outfile
    Throws exception on error
    * url     = where the request to be sent,
                e.g."https://www.cv.nrao.edu/cgi-bin/postage.pl"
    * args    = dict or arguments, e.g. {"arg1":"arg1"}
    * outfile = Name of the output file, absolute path or relative to CWD
                None => use name from server
    * err     = Python Obit Error/message stack
    """
    ################################################################
    # Package parameters
    encoded_args = urllib.urlencode(args)
    NVSShost = "https://www.cv.nrao.edu/cgi-bin/postage.pl"
    # fetch
    try:
        request = urllib2.Request(url)
        response = urllib2.urlopen(request, encoded_args)
        data = response.read()
    except Exception, exception:
        print exception
        OErr.PLog(err, OErr.Error, "Request from server failed")
        OErr.printErrMsg(err)
Ejemplo n.º 4
0
def WriteFGTable(outUV, katdata, meta, err):
    """
    Get the flags from the h5 file and convert to FG table format.
    UNUSED- This implimentation is too slow!
    outUV    = Obit UV object
    meta     =  dict with data meta data
    err      = Python Obit Error/message stack to init
    """
    ###############################################################

    # work out Start time in unix sec
    tm = katdata.timestamps[1:2]
    tx = time.gmtime(tm[0])
    time0 = tm[0] - tx[3] * 3600.0 - tx[4] * 60.0 - tx[5]
    int_time = katdata.dump_period

    #Loop through scans in h5 file
    row = 0
    for scan, state, target in katdata.scans():
        name = target.name.replace(' ', '_')
        if state != 'track':
            continue
        tm = katdata.timestamps[:]
        nint = len(tm)
        el = target.azel(tm[int(nint / 2)])[1] * 180. / math.pi
        if el < 15.0:
            continue
        row += 1
        flags = katdata.flags()[:]
        numflag = 0
        for t, chan_corr in enumerate(flags):
            for c, chan in enumerate(chan_corr):
                cpflagged = []
                for p, cp in enumerate(chan):
                    #for cpaverage in meta['pairLookup']:
                    flag = cp  #numpy.any(chan[meta['pairLookup'][cpaverage]])
                    product = meta['products'][p]
                    if product[0] == product[1]:
                        continue
                    if flag and (not product[0:2] in cpflagged):
                        cpflagged.append(product[0:2])
                        numflag += 1
                        starttime = float(
                            (tm[t] - time0 - (int_time / 2)) / 86400.0)
                        endtime = float(
                            (tm[t] - time0 + (int_time / 2)) / 86400.0)
                        UV.PFlag(outUV,err,timeRange=[starttime,endtime], flagVer=1, \
                                     Ants=[product[0],product[1]], \
                                     Chans=[c+1,c+1], IFs=[1,1], Stokes='1111', Reason='Online flag')
        numvis = t * c * (p / meta["nstokes"])
        msg = "Scan %4d %16s   Online flags: %7s of %8s vis" % (
            row, name, numflag, numvis)
        OErr.PLog(err, OErr.Info, msg)
        OErr.printErr(err)
Ejemplo n.º 5
0
def ConvertKATData(outUV, katdata, meta, err):
    """
    Read KAT HDF data and write Obit UV

     * outUV    = Obit UV object
     * katdata  = input KAT dataset
     * meta     = dict with data meta data
     * err      = Python Obit Error/message stack to init
    """
    ################################################################
    reffreq = meta["spw"][0][1]  # reference frequency
    lamb = 2.997924562e8 / reffreq  # wavelength of reference freq
    nchan = meta["spw"][0][0]  # number of channels
    nif = len(meta["spw"])  # Number of IFs
    nstok = meta["nstokes"]  # Number of Stokes products
    p = meta["products"]  # baseline stokes indices
    nprod = len(p)  # number of correlations/baselines
    # work out Start time in unix sec
    tm = katdata.timestamps[1:2]
    tx = time.gmtime(tm[0])
    time0 = tm[0] - tx[3] * 3600.0 - tx[4] * 60.0 - tx[5]

    # Set data to read one vis per IO
    outUV.List.set("nVisPIO", 1)

    # Open data
    zz = outUV.Open(UV.READWRITE, err)
    if err.isErr:
        OErr.printErrMsg(err, "Error opening output UV")
    # visibility record offsets
    d = outUV.Desc.Dict
    ilocu = d['ilocu']
    ilocv = d['ilocv']
    ilocw = d['ilocw']
    iloct = d['iloct']
    ilocb = d['ilocb']
    ilocsu = d['ilocsu']
    nrparm = d['nrparm']
    jlocc = d['jlocc']
    jlocs = d['jlocs']
    jlocf = d['jlocf']
    jlocif = d['jlocif']
    naxes = d['inaxes']
    count = 0.0
    visno = 0
    # Get IO buffers as numpy arrays
    shape = len(outUV.VisBuf) / 4
    buffer = numarray.array(sequence=outUV.VisBuf,
                            type=numarray.Float32,
                            shape=shape)

    # Template vis
    vis = outUV.ReadVis(err, firstVis=1)
    first = True
    firstVis = 1
    numflags = 0
    numvis = 0
    # Do we need to stop Fringes
    try:
        autodelay = [int(ad) for ad in katdata.sensor['DBE/auto-delay']]
        autodelay = all(autodelay)
    except:
        autodelay = False
    if not autodelay:
        msg = "W term in UVW coordinates will be used to stop the fringes."
        OErr.PLog(err, OErr.Info, msg)
        OErr.printErr(err)
        print msg
    for scan, state, target in katdata.scans():
        # Fetch data
        tm = katdata.timestamps[:]
        nint = len(tm)
        vs = katdata.vis[:]
        wt = katdata.weights()[:]
        fg = katdata.flags()[:]
        #Get target suid
        # Only on targets in the input list
        try:
            suid = meta["targLookup"][target.name[0:16]]
        except:
            continue
        # Negate the weights that are online flagged (ie. apply the online flags here)
        wt = numpy.where(fg, -wt, wt)
        numflags += numpy.sum(fg)
        numvis += fg.size
        uu = katdata.u
        vv = katdata.v
        ww = katdata.w
        # Number of integrations
        msg = "Scan:%4d Int: %4d %16s Start %s" % (
            scan, nint, target.name, day2dhms((tm[0] - time0) / 86400.0)[0:12])
        OErr.PLog(err, OErr.Info, msg)
        OErr.printErr(err)
        print msg
        # Loop over integrations
        for iint in range(0, nint):
            # loop over data products/baselines
            for iprod in range(0, nprod):
                thisvis = vs[iint:iint + 1, :, iprod:iprod + 1]
                thisw = ww[iint:iint + 1, iprod]
                # Fringe stop the data if necessary
                if not autodelay:
                    thisvis = StopFringes(thisvis[:, :,
                                                  0], katdata.channel_freqs,
                                          thisw, katdata.corr_products[iprod])
                # Copy slices
                indx = nrparm + (p[iprod][2]) * 3
                buffer[indx:indx + (nchan + 1) * nstok * 3:nstok *
                       3] = thisvis.real.flatten()
                indx += 1
                buffer[indx:indx + (nchan + 1) * nstok * 3:nstok *
                       3] = thisvis.imag.flatten()
                indx += 1
                buffer[indx:indx + (nchan + 1) * nstok * 3:nstok *
                       3] = wt[iint:iint + 1, :, iprod:iprod + 1].flatten()
                # Write if Stokes index >= next or the last
                if (iprod == nprod - 1) or (p[iprod][2] >= p[iprod + 1][2]):
                    # Random parameters
                    buffer[ilocu] = uu[iint][iprod] / lamb
                    buffer[ilocv] = vv[iint][iprod] / lamb
                    buffer[ilocw] = ww[iint][iprod] / lamb
                    buffer[iloct] = (tm[iint] -
                                     time0) / 86400.0  # Time in days
                    buffer[ilocb] = p[iprod][0] * 256.0 + p[iprod][1]
                    buffer[ilocsu] = suid
                    outUV.Write(err, firstVis=visno)
                    visno += 1
                    buffer[3] = -3.14159
                    #print visno,buffer[0:5]
                    firstVis = None  # Only once
                    # initialize visibility
                    first = True
        # end loop over integrations
        if err.isErr:
            OErr.printErrMsg(err, "Error writing data")
    # end loop over scan
    if numvis > 0:
        msg = "Applied %s online flags to %s visibilities (%.3f%%)" % (
            numflags, numvis, float(numflags) / float(numvis))
        OErr.PLog(err, OErr.Info, msg)
        OErr.printErr(err)
    outUV.Close(err)
    if err.isErr:
        OErr.printErrMsg(err, "Error closing data")
Ejemplo n.º 6
0
def KAT2AIPS (katdata, outUV, disk, fitsdisk, err, \
              calInt=1.0, **kwargs):
    """Convert KAT-7 HDF 5 data set to an Obit UV.

    This module requires katdal and katpoint and their dependencies
    contact Ludwig Schwardt <*****@*****.**> for details.

    Parameters
    ----------
    katdata : string
        input katfile object
    outUV : ??
        Obit UV object, should be a KAT template for the
        appropriate number of IFs and poln.
    disk  : int
        AIPS Disk number
    fitsdisk: int
        FITS Disk number
    err : ??
        Obit error/message stack
    calInt : 
        Calibration interval in min.
    targets : list, optinal
        List of targetnames to extract from the file
    """
    ################################################################
    OErr.PLog(err, OErr.Info, "Converting h5 data to AIPS UV format.")
    OErr.printErr(err)
    print "Converting h5 data to AIPS UV format.\n"

    # Extract metadata
    meta = GetKATMeta(katdata, err)

    # Extract AIPS parameters of the uv data to the metadata
    meta["Aproject"] = outUV.Aname
    meta["Aclass"] = outUV.Aclass
    meta["Aseq"] = outUV.Aseq
    meta["Adisk"] = disk
    meta["calInt"] = calInt
    meta["fitsdisk"] = fitsdisk
    # Update descriptor
    UpdateDescriptor(outUV, meta, err)
    # Write AN table
    WriteANTable(outUV, meta, err)
    # Write FQ table
    WriteFQTable(outUV, meta, err)
    # Write SU table
    WriteSUTable(outUV, meta, err)

    # Convert data
    ConvertKATData(outUV, katdata, meta, err)

    # Index data
    OErr.PLog(err, OErr.Info, "Indexing data")
    OErr.printErr(err)
    UV.PUtilIndex(outUV, err)

    # Open/close UV to update header
    outUV.Open(UV.READONLY, err)
    outUV.Close(err)
    if err.isErr:
        OErr.printErrMsg(err, message="Update UV header failed")

    # initial CL table
    OErr.PLog(err, OErr.Info, "Create Initial CL table")
    OErr.printErr(err)
    print "Create Initial CL table\n"
    UV.PTableCLGetDummy(outUV, outUV, 1, err, solInt=calInt)
    outUV.Open(UV.READONLY, err)
    outUV.Close(err)
    if err.isErr:
        OErr.printErrMsg(err, message="Update UV header failed")

    # History
    outHistory = History.History("outhistory", outUV.List, err)
    outHistory.Open(History.READWRITE, err)
    outHistory.TimeStamp("Convert KAT7 HDF 5 data to Obit", err)
    outHistory.WriteRec(-1, "datafile = " + katdata.name, err)
    outHistory.WriteRec(-1, "calInt   = " + str(calInt), err)
    outHistory.Close(err)
    outUV.Open(UV.READONLY, err)
    outUV.Close(err)
    if err.isErr:
        OErr.printErrMsg(err, message="Update UV header failed")
    # Return the metadata for the pipeline
    return meta
Ejemplo n.º 7
0
def MKContPipeline(files, outputdir, **kwargs):
    """MeerKAT Continuum pipeline.

    Parameters
    ----------
    files : list
        h5 filenames (note: support for multiple h5 files 
        i.e. ConcatenatedDataSet is not currently supported)
    outputdir : string
        Directory location to write output data, 
    scratchdir : string, optional
        The directory location of the aips disk
    parmFile : string, optional
        Overwrite the default imaging parameters using this parameter file.
    """
    #if len(files) > 1:
    #    raise TooManyKatfilesException('Processing multiple katfiles are not currently supported')
    # Onle be concatenated if we have to be
    if len(files) == 1:
        h5file = files[0]
    else:
        h5file = files

    # Die gracefully if we cannot write to the output area...
    if not os.path.exists(outputdir):
        print('Specified output directory: ' + outputdir + 'does not exist.')
        exit(-1)

    # Obit error logging
    err = OErr.OErr()

    #################### Initialize filenames #######################################################
    fileRoot = os.path.join(outputdir,
                            os.path.basename(os.path.splitext(
                                files[0])[0]))  # root of file name
    logFile = fileRoot + ".log"  # Processing log file
    avgClass = ("UVAv")[0:6]  # Averaged data AIPS class
    manifestfile = outputdir + '/manifest.pickle'

    ############################# Initialize OBIT and AIPS ##########################################
    noScrat = []
    # Logging directly to logFile
    OErr.PInit(err, 2, logFile)
    EVLAAddOutFile(os.path.basename(logFile), 'project', 'Pipeline log file')
    if kwargs.get('reuse'):
        ObitSys = AIPSSetup.AIPSSetup(err,
                                      configfile=kwargs.get('configFile'),
                                      scratchdir=kwargs.get('scratchdir'),
                                      aipsdisk=kwargs.get('aipsdisk'),
                                      overwrite=False)
    else:
        ObitSys = AIPSSetup.AIPSSetup(err,
                                      configfile=kwargs.get('configFile'),
                                      scratchdir=kwargs.get('scratchdir'),
                                      aipsdisk=kwargs.get('aipsdisk'))

    # Get the set up AIPS environment.
    AIPS_ROOT = os.environ['AIPS_ROOT']
    AIPS_VERSION = os.environ['AIPS_VERSION']

    nThreads = 72
    user = OSystem.PGetAIPSuser()
    AIPS.userno = user
    disk = 1
    fitsdisk = 0
    nam = os.path.basename(os.path.splitext(files[0])[0])[0:10]
    cls = "Raw"
    seq = 1

    ############################# Initialise Parameters ##########################################
    ####### Initialize parameters dictionary #####
    parms = KATInitContParms()
    ####### User defined parameters ######
    if kwargs.get('parmFile'):
        print("parmFile", kwargs.get('parmFile'))
        exec(open(kwargs.get('parmFile')).read())
        EVLAAddOutFile(os.path.basename(kwargs.get('parmFile')), 'project',
                       'Pipeline input parameters')

    ############### Initialize katfile object, uvfits object and condition data #########################
    OK = False
    # Open the h5 file as a katfile object
    try:
        #open katfile and perform selection according to kwargs
        katdata = katfile.open(h5file)
        OK = True
    except Exception as exception:
        print(exception)
    if not OK:
        OErr.PSet(err)
        OErr.PLog(err, OErr.Fatal,
                  "Unable to read KAT HDF5 data in " + str(h5file))
        raise KATUnimageableError("Unable to read KAT HDF5 data in " +
                                  str(h5file))

    #Are we MeerKAT or KAT-7
    telescope = katdata.ants[0].name[0]
    if telescope == 'm':
        sefd = 500.
    else:
        sefd = 1200.
    #Get calibrator models
    fluxcals = katpoint.Catalogue(
        open(FITSDir.FITSdisks[0] + "/" + parms["fluxModel"]))
    #Condition data (get bpcals, update names for aips conventions etc)
    KATh5Condition(katdata, fluxcals, err)

    ###################### Data selection and static edits ############################################
    # Select data based on static imageable parameters
    MKATh5Select(katdata, parms, err, **kwargs)

    # General AIPS data parameters at script level
    dataClass = ("UVDa")[0:6]  # AIPS class of raw uv data
    band = katdata.spectral_windows[0].product  #Correlator product
    project = os.path.basename(os.path.splitext(files[0])[0])[
        0:10]  # Project name (12 char or less, used as AIPS Name)
    outIClass = parms["outIClass"]  # image AIPS class
    debug = parms["debug"]
    check = parms["check"]

    ####################### Import data into AIPS #####################################################
    # Reuse or nay?
    if kwargs.get('reuse'):
        uv = UV.newPAUV("AIPS UV DATA", EVLAAIPSName(project), dataClass, disk,
                        seq, True, err)
        obsdata = KATH5toAIPS.GetKATMeta(katdata, err)
        # Extract AIPS parameters of the uv data to the metadata
        obsdata["Aproject"] = uv.Aname
        obsdata["Aclass"] = uv.Aclass
        obsdata["Aseq"] = uv.Aseq
        obsdata["Adisk"] = disk
        obsdata["calInt"] = katdata.dump_period
        obsdata["fitsdisk"] = fitsdisk
        # TODO: Check if the input data has been Hanned.
        doneHann = True
    else:
        # Number of baselines gives batch size
        nbl = len(
            np.unique([(cp[0][:-1] + cp[1][:-1]).upper()
                       for cp in katdata.corr_products]))
        # Construct a template uvfits file from master template
        mastertemplate = ObitTalkUtil.FITSDir.FITSdisks[
            fitsdisk] + 'MKATTemplate.uvtab.gz'
        outtemplate = nam + '.uvtemp'
        KATH5toAIPS.MakeTemplate(mastertemplate,
                                 outtemplate,
                                 len(katdata.channel_freqs),
                                 nvispio=nbl)
        uv = OTObit.uvlod(outtemplate, 0, EVLAAIPSName(project), cls, disk,
                          seq, err)
        obsdata = KATH5toAIPS.KAT2AIPS(katdata,
                                       uv,
                                       disk,
                                       fitsdisk,
                                       err,
                                       calInt=katdata.dump_period,
                                       **kwargs)
        MakeIFs.UVMakeIF(uv, 8, err)
        os.remove(outtemplate)
    # Print the uv data header to screen.
    uv.Header(err)
    ############################# Set Project Processing parameters ###################################
    # Parameters derived from obsdata and katdata
    MKATGetObsParms(obsdata, katdata, parms, logFile)

    ###### Initialise target parameters #####
    KATInitTargParms(katdata, parms, err)

    # Load the outputs pickle jar
    EVLAFetchOutFiles()

    OSystem.PAllowThreads(nThreads)  # Allow threads in Obit/oython
    retCode = 0

    maxgap = max(parms["CalAvgTime"], 20 * katdata.dump_period) / 60.
    ################### Start processing ###############################################################

    mess = "Start project "+parms["project"]+" AIPS user no. "+str(AIPS.userno)+\
           ", KAT7 configuration "+parms["KAT7Cfg"]
    printMess(mess, logFile)
    if debug:
        pydoc.ttypager = pydoc.plainpager  # don't page task input displays
        mess = "Using Debug mode "
        printMess(mess, logFile)
    if check:
        mess = "Only checking script"
        printMess(mess, logFile)

    # Log parameters
    printMess("Parameter settings", logFile)
    for p in parms:
        mess = "  " + p + ": " + str(parms[p])
        printMess(mess, logFile)
    clist = []
    for DCal in parms["DCals"]:
        if DCal["Source"] not in clist:
            clist.append(DCal["Source"])
    for PCal in parms["PCals"]:
        if PCal["Source"] not in clist:
            clist.append(PCal["Source"])
    for ACal in parms["ACals"]:
        if ACal["Source"] not in clist:
            clist.append(ACal["Source"])
    if kwargs.get('targets') is not None:
        targets = [
            targ.name for targ in katdata.catalogue
            if (targ.name not in clist) and (
                targ.name in kwargs.get('targets').split(','))
        ]
    else:
        targets = [
            targ.name for targ in katdata.catalogue if (targ.name not in clist)
        ]
    refAnt = FetchObject(fileRoot + ".refAnt.pickle")

    # Save parameters to pickle jar, manifest
    ParmsPicklefile = fileRoot + ".Parms.pickle"  # Where results saved
    SaveObject(parms, ParmsPicklefile, True)
    EVLAAddOutFile(os.path.basename(ParmsPicklefile), 'project',
                   'Processing parameters used')
    loadClass = dataClass

    # Hanning - No Hanning
    parms["doHann"] = False
    doneHann = False

    if doneHann:
        # Halve channels after hanning.
        parms["selChan"] = int(parms["selChan"] / 2)
        parms["BChDrop"] = int(parms["BChDrop"] / 2)
        parms["EChDrop"] = int(parms["EChDrop"] / 2)
        if uv == None and not check:
            raise RuntimeError("Cannot Hann data ")

    # Clear any old calibration/editing
    if parms["doClearTab"] or kwargs.get('reuse'):
        mess = "Clear previous calibration"
        printMess(mess, logFile)
        EVLAClearCal(uv,
                     err,
                     doGain=parms["doClearGain"],
                     doFlag=parms["doClearFlag"],
                     doBP=parms["doClearBP"],
                     check=check)
        OErr.printErrMsg(err, "Error resetting calibration")

        # Quack to remove data from start and end of each scan
    if parms["doQuack"]:
        retCode = EVLAQuack (uv, err, begDrop=parms["quackBegDrop"], endDrop=parms["quackEndDrop"], \
                             Reason=parms["quackReason"], \
                             logfile=logFile, check=check, debug=debug)
        if retCode != 0:
            raise RuntimeError("Error Quacking data")

    # Flag antennas shadowed by others?
    if parms["doShad"]:
        retCode = EVLAShadow (uv, err, shadBl=parms["shadBl"], \
                              logfile=logFile, check=check, debug=debug)
        if retCode != 0:
            raise RuntimeError("Error Shadow flagging data")

    # Median window time editing, for RFI impulsive in time
    if parms["doMednTD1"]:
        mess = "Median window time editing, for RFI impulsive in time:"
        printMess(mess, logFile)
        retCode = EVLAMedianFlag (uv, clist, err, noScrat=noScrat, nThreads=nThreads, \
                                  avgTime=parms["mednAvgTime"], avgFreq=parms["mednAvgFreq"],  chAvg= parms["mednChAvg"], \
                                  timeWind=parms["mednTimeWind"],flagVer=2, flagTab=2,flagSig=parms["mednSigma"], \
                                  logfile=logFile, check=check, debug=False)
        if retCode != 0:
            raise RuntimeError("Error in MednFlag")

    # Median window frequency editing, for RFI impulsive in frequency
    if parms["doFD1"]:
        mess = "Median window frequency editing, for RFI impulsive in frequency:"
        printMess(mess, logFile)
        retCode = EVLAAutoFlag (uv, clist, err, flagVer=2, flagTab=2, doCalib=-1, doBand=-1,   \
                                timeAvg=parms["FD1TimeAvg"], \
                                doFD=True, FDmaxAmp=1.0e20, FDmaxV=1.0e20, FDwidMW=parms["FD1widMW"],  \
                                FDmaxRMS=[1.0e20,0.1], FDmaxRes=parms["FD1maxRes"],  \
                                FDmaxResBL= parms["FD1maxRes"],  FDbaseSel=parms["FD1baseSel"],\
                                nThreads=nThreads, logfile=logFile, check=check, debug=debug)
        if retCode != 0:
            raise RuntimeError("Error in AutoFlag")

    # Parallactic angle correction?
    if parms["doPACor"]:
        retCode = EVLAPACor(uv, err, \
                                logfile=logFile, check=check, debug=debug)
        if retCode != 0:
            raise RuntimeError("Error in Parallactic angle correction")

    # Need to find a reference antenna?  See if we have saved it?
    if (parms["refAnt"] <= 0):
        refAnt = FetchObject(fileRoot + ".refAnt.pickle")
        if refAnt:
            parms["refAnt"] = refAnt

    # Use bandpass calibrator and center half of each spectrum
    if parms["refAnt"] <= 0:
        mess = "Find best reference antenna: run Calib on BP Cal(s) "
        printMess(mess, logFile)
        parms["refAnt"] = EVLAGetRefAnt(uv, parms["BPCals"], err, flagVer=0, \
                                        solInt=parms["bpsolint1"], nThreads=nThreads, \
                                        logfile=logFile, check=check, debug=debug)
        if err.isErr:
            raise RuntimeError("Error finding reference antenna")
        if parms["refAnts"][0] <= 0:
            parms["refAnts"][0] = parms["refAnt"]
        mess = "Picked reference antenna " + str(parms["refAnt"])
        printMess(mess, logFile)
        # Save it
        ParmsPicklefile = fileRoot + ".Parms.pickle"  # Where results saved
        SaveObject(parms, ParmsPicklefile, True)
        refAntPicklefile = fileRoot + ".refAnt.pickle"  # Where results saved
        SaveObject(parms["refAnt"], refAntPicklefile, True)

    # Plot Raw, edited data?
    parms["doRawSpecPlot"] = False
    parms["doSpecPlot"] = False
    if parms["doRawSpecPlot"] and parms["plotSource"]:
        mess = "Raw Spectral plot for: " + ' '.join(parms["BPCal"])
        printMess(mess, logFile)
        plotFile = fileRoot + "_RawSpec.ps"
        retCode = EVLASpectrum(uv, parms["BPCal"], parms["plotTime"], maxgap, plotFile, parms["refAnt"], err, \
                               Stokes=["RR","LL"], doband=-1,          \
                               check=check, debug=debug, logfile=logFile )
        if retCode != 0:
            raise RuntimeError("Error in Plotting spectrum")
        EVLAAddOutFile(plotFile, 'project', 'Pipeline log file')

    # delay calibration
    if parms["doDelayCal"] and parms["DCals"] and not check:
        plotFile = fileRoot + "_DelayCal.ps"
        retCode = EVLADelayCal(uv, parms["DCals"], err,  \
                               BChan=parms["delayBChan"], EChan=parms["delayEChan"], \
                               doCalib=2, flagVer=0, doBand=-1, \
                               solInt=parms["delaySolInt"], smoTime=parms["delaySmoo"],  \
                               refAnts=[parms["refAnt"]], doTwo=parms["doTwo"],
                               doZeroPhs=parms["delayZeroPhs"], \
                               doPlot=parms["doSNPlot"], plotFile=plotFile, \
                               nThreads=nThreads, noScrat=noScrat, \
                               logfile=logFile, check=check, debug=debug)
        if retCode != 0:
            raise RuntimeError("Error in delay calibration")

        # Plot corrected data?
        if parms["doSpecPlot"] and parms["plotSource"]:
            plotFile = fileRoot + "_DelaySpec.ps"
            retCode = EVLASpectrum(uv, parms["BPCal"], parms["plotTime"], maxgap, \
                                   plotFile, parms["refAnt"], err, \
                                   Stokes=["RR","LL"], doband=-1,          \
                                   check=check, debug=debug, logfile=logFile )
            if retCode != 0:
                raise RuntimeError("Error in Plotting spectrum")
        print(parms["bpBChan1"], parms["bpEChan1"], parms["bpBChan2"],
              parms["bpEChan2"], parms["bpChWid2"])
    # Bandpass calibration
    if parms["doBPCal"] and parms["BPCals"]:
        retCode = KATBPCal(uv, parms["BPCals"], err, noScrat=noScrat, solInt1=parms["bpsolint1"], \
                            solInt2=parms["bpsolint2"], solMode=parms["bpsolMode"], \
                            BChan1=parms["bpBChan1"], EChan1=parms["bpEChan1"], \
                            BChan2=parms["bpBChan2"], EChan2=parms["bpEChan2"], ChWid2=parms["bpChWid2"], \
                            doCenter1=parms["bpDoCenter1"], refAnt=parms["refAnt"], \
                            UVRange=parms["bpUVRange"], doCalib=2, gainUse=0, flagVer=0, doPlot=False, \
                            nThreads=nThreads, logfile=logFile, check=check, debug=debug)
        if retCode != 0:
            raise RuntimeError("Error in Bandpass calibration")

        # Plot corrected data?
        if parms["doSpecPlot"] and parms["plotSource"]:
            plotFile = fileRoot + "_BPSpec.ps"
            retCode = EVLASpectrum(uv, parms["BPCal"], parms["plotTime"], maxgap, plotFile, \
                                   parms["refAnt"], err, Stokes=["RR","LL"], doband=2,          \
                                   check=check, debug=debug, logfile=logFile )
            if retCode != 0:
                raise RuntimeError("Error in Plotting spectrum")

    # Amp & phase Calibrate
    if parms["doAmpPhaseCal"]:
        plotFile = fileRoot + "_APCal.ps"
        retCode = KATCalAP (uv, [], parms["ACals"], err, PCals=parms["PCals"],
                             doCalib=2, doBand=2, BPVer=1, flagVer=0, \
                             BChan=parms["ampBChan"], EChan=parms["ampEChan"], \
                             solInt=parms["solInt"], solSmo=parms["solSmo"], ampScalar=parms["ampScalar"], \
                             doAmpEdit=parms["doAmpEdit"], ampSigma=parms["ampSigma"], \
                             ampEditFG=parms["ampEditFG"], \
                             doPlot=parms["doSNPlot"], plotFile=plotFile,  refAnt=parms["refAnt"], \
                             nThreads=nThreads, noScrat=noScrat, logfile=logFile, check=check, debug=debug)
        #print parms["ACals"],parms["PCals"]
        if retCode != 0:
            raise RuntimeError("Error calibrating")

    # More editing

    if parms["doAutoFlag"]:
        mess = "Post calibration editing:"
        printMess(mess, logFile)
        # if going to redo then only calibrators
        if parms["doRecal"]:
            # Only calibrators
            clist = []
            for DCal in parms["DCals"]:
                if DCal["Source"] not in clist:
                    clist.append(DCal["Source"])
            for PCal in parms["PCals"]:
                if PCal["Source"] not in clist:
                    clist.append(PCal["Source"])
            for ACal in parms["ACals"]:
                if ACal["Source"] not in clist:
                    clist.append(ACal["Source"])
        else:
            clist = []

        retCode = EVLAAutoFlag (uv, clist, err, flagVer=0, flagTab =2, \
                                doCalib=2, gainUse=0, doBand=2, BPVer=1,  \
                                IClip=parms["IClip"], minAmp=parms["minAmp"], timeAvg=parms["timeAvg"], \
                                doFD=parms["doFirstAFFD"], FDmaxAmp=parms["FDmaxAmp"], FDmaxV=parms["FDmaxV"], \
                                FDwidMW=parms["FDwidMW"], FDmaxRMS=parms["FDmaxRMS"], \
                                FDmaxRes=parms["FDmaxRes"],  FDmaxResBL=parms["FDmaxResBL"], \
                                FDbaseSel=parms["FDbaseSel"], \
                                nThreads=nThreads, logfile=logFile, check=check, debug=debug)
        if retCode != 0:
            raise RuntimeError("Error in AutoFlag")

    # Redo the calibration using new flagging?
    if parms["doBPCal2"] == None:
        parms["doBPCal2"] = parms["doBPCal"]
    if parms["doDelayCal2"] == None:
        parms["doDelayCal2"] = parms["doDelayCal2"]
    if parms["doAmpPhaseCal2"] == None:
        parms["doAmpPhaseCal2"] = parms["doAmpPhaseCal"]
    if parms["doAutoFlag2"] == None:
        parms["doAutoFlagCal2"] = parms["doAutoFlag"]
    if parms["doRecal"]:
        mess = "Redo calibration:"
        printMess(mess, logFile)
        EVLAClearCal(uv,
                     err,
                     doGain=True,
                     doFlag=False,
                     doBP=True,
                     check=check,
                     logfile=logFile)
        OErr.printErrMsg(err, "Error resetting calibration")
        # Parallactic angle correction?
        if parms["doPACor"]:
            retCode = EVLAPACor(uv, err, \
                                logfile=logFile, check=check, debug=debug)
            if retCode != 0:
                raise RuntimeError("Error in Parallactic angle correction")

        # Delay recalibration
        if parms["doDelayCal2"] and parms["DCals"] and not check:
            plotFile = fileRoot + "_DelayCal2.ps"
            retCode = EVLADelayCal(uv, parms["DCals"], err, \
                                   BChan=parms["delayBChan"], EChan=parms["delayEChan"], \
                                   doCalib=2, flagVer=0, doBand=-1, \
                                   solInt=parms["delaySolInt"], smoTime=parms["delaySmoo"],  \
                                   refAnts=[parms["refAnt"]], doTwo=parms["doTwo"], \
                                   doZeroPhs=parms["delayZeroPhs"], \
                                   doPlot=parms["doSNPlot"], plotFile=plotFile, \
                                   nThreads=nThreads, noScrat=noScrat, \
                                   logfile=logFile, check=check, debug=debug)
            if retCode != 0:
                raise RuntimeError("Error in delay calibration")

            # Plot corrected data?
            if parms["doSpecPlot"] and parms["plotSource"]:
                plotFile = fileRoot + "_DelaySpec2.ps"
                retCode = EVLASpectrum(uv, parms["BPCal"], parms["plotTime"], maxgap, plotFile, parms["refAnt"], err, \
                                       Stokes=["RR","LL"], doband=-1,          \
                                       check=check, debug=debug, logfile=logFile )
                if retCode != 0:
                    raise RuntimeError("Error in Plotting spectrum")

        # Bandpass calibration
        if parms["doBPCal2"] and parms["BPCals"]:
            retCode = KATBPCal(uv, parms["BPCals"], err, noScrat=noScrat, solInt1=parms["bpsolint1"], \
                            solInt2=parms["bpsolint2"], solMode=parms["bpsolMode"], \
                            BChan1=parms["bpBChan1"], EChan1=parms["bpEChan1"], \
                            BChan2=parms["bpBChan2"], EChan2=parms["bpEChan2"], ChWid2=parms["bpChWid2"], \
                            doCenter1=parms["bpDoCenter1"], refAnt=parms["refAnt"], \
                            UVRange=parms["bpUVRange"], doCalib=2, gainUse=0, flagVer=0, doPlot=False, \
                            nThreads=nThreads, logfile=logFile, check=check, debug=debug)
            if retCode != 0:
                raise RuntimeError("Error in Bandpass calibration")

            # Plot corrected data?
            if parms["doSpecPlot"] and parms["plotSource"]:
                plotFile = fileRoot + "_BPSpec2.ps"
                retCode = EVLASpectrum(uv, parms["BPCal"], parms["plotTime"], maxgap, plotFile, parms["refAnt"], err, \
                                   Stokes=["RR","LL"], doband=2,          \
                                   check=check, debug=debug, logfile=logFile )
            if retCode != 0:
                raise RuntimeError("Error in Plotting spectrum")

        # Amp & phase Recalibrate
        if parms["doAmpPhaseCal2"]:
            plotFile = fileRoot + "_APCal2.ps"
            retCode = KATCalAP (uv, [], parms["ACals"], err, PCals=parms["PCals"], \
                                 doCalib=2, doBand=2, BPVer=1, flagVer=0, \
                                 BChan=parms["ampBChan"], EChan=parms["ampEChan"], \
                                 solInt=parms["solInt"], solSmo=parms["solSmo"], ampScalar=parms["ampScalar"], \
                                 doAmpEdit=True, ampSigma=parms["ampSigma"], \
                                 ampEditFG=parms["ampEditFG"], \
                                 doPlot=parms["doSNPlot"], plotFile=plotFile, refAnt=parms["refAnt"], \
                                 noScrat=noScrat, nThreads=nThreads, logfile=logFile, check=check, debug=debug)
            if retCode != 0:
                raise RuntimeError("Error calibrating")

        # More editing
        parms["doAutoFlag2"] = False
        if parms["doAutoFlag2"]:
            mess = "Post recalibration editing:"
            printMess(mess, logFile)
            retCode = EVLAAutoFlag (uv, [], err, flagVer=0, flagTab=2, \
                                    doCalib=2, gainUse=0, doBand=2, BPVer=1,  \
                                    IClip=parms["IClip"], minAmp=parms["minAmp"], timeAvg=parms["timeAvg"], \
                                    doFD=parms["doSecAFFD"], FDmaxAmp=parms["FDmaxAmp"], FDmaxV=parms["FDmaxV"], \
                                    FDwidMW=parms["FDwidMW"], FDmaxRMS=parms["FDmaxRMS"], \
                                    FDmaxRes=parms["FDmaxRes"],  FDmaxResBL= parms["FDmaxResBL"], \
                                    FDbaseSel=parms["FDbaseSel"], \
                                    nThreads=nThreads, logfile=logFile, check=check, debug=debug)
            if retCode != 0:
                raise RuntimeError("Error in AutoFlag")
    # end recal
    # Calibrate and average data
    # Overwrite avgStokes from command line
    parms["avgFreq"] = 0
    parms["chAvg"] = 1
    parms["doCalAvg"] = 'Splat'
    if kwargs.get('halfstokes'):
        parms["avgStokes"] = 'HALF'
    if parms["doCalAvg"] == 'Splat':
        retCode = KATCalAvg (uv, avgClass, parms["seq"], parms["CalAvgTime"], err, \
                              flagVer=2, doCalib=2, gainUse=0, doBand=2, BPVer=1, doPol=False, \
                              avgFreq=parms["avgFreq"], chAvg=parms["chAvg"], Stokes=parms["avgStokes"], \
                              BChan=1, EChan=parms["selChan"] - 1, doAuto=parms["doAuto"], \
                              BIF=parms["CABIF"], EIF=parms["CAEIF"], Compress=parms["Compress"], \
                              nThreads=nThreads, logfile=logFile, check=check, debug=debug)
        if retCode != 0:
            raise RuntimeError("Error in CalAvg")
    elif parms["doCalAvg"] == 'BL':
        retCode = KATBLCalAvg (uv, avgClass, parms["seq"], err, \
                              flagVer=2, doCalib=2, gainUse=0, doBand=2, BPVer=1, doPol=False, \
                              avgFreq=parms["avgFreq"], chAvg=parms["chAvg"], FOV=parms['FOV'], \
                              maxInt=min(parms["solPInt"],parms["solAInt"]), Stokes=parms["avgStokes"], \
                              BChan=1, EChan=parms["selChan"] - 1, timeAvg=parms["CalAvgTime"], \
                              BIF=parms["CABIF"], EIF=parms["CAEIF"], Compress=parms["Compress"], \
                              logfile=logFile, check=check, debug=debug)
        if retCode != 0:
            raise RuntimeError("Error in BLCalAvg")

    if parms["doSaveTab"]:
        filename = project + ".CalTab.uvtab"
        _ = EVLAUVFITSTab(uv, filename, 0, err, logfile=logFile)

    #Zap unaveraged data if requested
    if kwargs.get('zapraw'):
        uv.Zap(err)

    # Get calibrated/averaged data
    if not check:
        uv = UV.newPAUV("AIPS UV DATA", EVLAAIPSName(project), avgClass[0:6], \
                        disk, parms["seq"], True, err)
        if err.isErr:
            OErr.printErrMsg(err, "Error creating cal/avg AIPS data")

    plotFile = fileRoot + "_Spec.ps"
    retCode = EVLASpectrum(uv, parms["BPCal"], parms["plotTime"], maxgap, \
                               plotFile, parms["refAnt"], err, \
                               Stokes=["I"], doband=-1, docalib=-1,      \
                               check=check, debug=debug, logfile=logFile )
    if retCode != 0:
        raise RuntimeError("Error in Plotting spectrum")

    # KATUVFITS(uv, 'preimage.uvfits', 0, err, exclude=["AIPS HI", "AIPS SL", "AIPS PL"],
    # include=["AIPS AN", "AIPS FQ"], compress=parms["Compress"], logfile=logFile)
    KATUVFITab(uv, project + '.uvtab', 0, err)
    #Gzip the data?
    if kwargs.get('gzip'):
        os.system('pigz -p %d %s' % (nThreads, project + '.uvtab'))
        os.system('rm -f %s' % (project + '.uvtab'))
Ejemplo n.º 8
0
def SelectCC(im, inCC, outCC, radius, peelPos, err):
    """
    Select/copy CCs more than radius from  peelPos
    
    This generates a CC table which can be subtracted from the uv data
    and remove all sources but the peel source area.
    * im     = Python Image with CC Tables
    * inCC   = input CC version
    * outCC  = output CC version
    * radius = radius (deg) of zone of exclusion
    * peelPos= [RA, Dec] in deg.
    * err    = Python Obit Error/message stack
    """
    ################################################################
    # Checks
    if not Image.PIsA(im):
        raise TypeError("im MUST be a Python Obit Image")
    if not OErr.OErrIsA(err):
        raise TypeError("err MUST be an OErr")
    #
    # Geometry
    xref = im.Desc.Dict['crval'][0]
    yref = im.Desc.Dict['crval'][1]
    xrefpix = im.Desc.Dict['crpix'][0]
    yrefpix = im.Desc.Dict['crpix'][1]
    xinc = abs(im.Desc.Dict['cdelt'][0])
    yinc = im.Desc.Dict['cdelt'][1]
    rot = im.Desc.Dict['crota'][1]
    imtype = im.Desc.Dict['ctype'][0][4:]
    # Input CC
    inTab = im.NewTable(Table.READONLY, "AIPS CC", inCC, err)
    if err.isErr:
        OErr.printErrMsg(err, "Error finding input CC Table")
        return
    # Output CC
    nrow = inTab.Desc.Dict['nrow']
    noParms = inTab.Desc.List.Dict['NO_PARMS'][2][0]
    outTab = im.NewTable(Table.WRITEONLY, "AIPS CC", outCC, err, \
                             noParms = noParms)
    if err.isErr:
        OErr.printErrMsg(err, "Error creating output CC Table")
        return
    # Open
    inTab.Open(Table.READONLY, err)
    outTab.Open(Table.WRITEONLY, err)
    if err.isErr:
        OErr.printErrMsg(err, "Error opening CC Tables")
        return
    orow = 1
    count = 0
    sumf = 0.0
    OErr.PLog(err, OErr.Info, "Excluding:")
    for irow in range(1, nrow + 1):
        row = inTab.ReadRow(irow, err)
        # Want this one?
        dx = row['DELTAX'][0]
        dy = row['DELTAY'][0]
        [ierr, xpos, ypos] = SkyGeom.PWorldPosLM(dx, dy, xref, yref, xinc,
                                                 yinc, rot, imtype)
        # Small angle approximation
        dra = (xpos - peelPos[0]) * cos(radians(xpos))
        delta = ((dra)**2 + (ypos - peelPos[1])**2)**0.5
        if delta > radius:
            outTab.WriteRow(orow, row, err)
            orow += 1
        else:
            #print irow,xpos,ypos
            count += 1
            sumf += row['FLUX'][0]
            ras = ImageDesc.PRA2HMS(xpos)
            decs = ImageDesc.PDec2DMS(ypos)
            OErr.PLog(err, OErr.Info,
                      "%6d %s %s flux= %f" % (irow, ras, decs, row['FLUX'][0]))
    # End loop
    OErr.PLog(err, OErr.Info, "Drop %6d CCs, sum flux= %f" % (count, sumf))
    OErr.printErr(err)
    inTab.Close(err)
    outTab.Close(err)
    if err.isErr:
        OErr.printErrMsg(err, "Error copying CC Table")
        return
Ejemplo n.º 9
0
def SubPeel(uv, source, imp, uvp, err, addBack=False, seq=999, \
                flagVer=0, nThreads=1, doGPU=False, noScrat=[0,0,0], taskLog='', debug=False):
    """
    Subtract Peel model w/ solutions, then optionally add back w/o corruptions
    
    UV data should have calibration tables from self calibration
    Output data will be on the same disk as the input, seq=seq, class='PelSub' and
    with name = source (up to 12 char).
    Returns Peel source subtracted/replaced data
    * uv        Dataset with cal tables
                Needs at least the self cal gain table
    * source    source name
    * imp       Peel source model (CC table from ImagePeel)
    * uvp       UV data the result of peel (ImagePeel)
    * err       Python Obit Error/message stack
    * seq       Sequence number for output
    * addBack   Add model back to data w/o corruptions? Not recommended.
    * flagVer   FG table to apply, -1=> no flag
    * nThreads  number of threads to use
    * doGPU     Use GPU if available?
    * noScrat   AIPS disks not to use for scratch
    * taskLog   Log file
    * debug     If True leave debug Input file in /tmp
    """
    ################################################################
    # Checks
    if not UV.PIsA(uv):
        raise TypeError("uv MUST be a Python Obit UV data")
    if not Image.PIsA(imp):
        raise TypeError("imp MUST be a Python Obit Image")
    if not OErr.OErrIsA(err):
        raise TypeError("err MUST be an OErr")
    # Split main data set
    OErr.PLog(err, OErr.Info, "Copy data")
    OErr.printErr(err)
    split = ObitTask('Split')
    setname(uv, split)
    split.outDisk = split.inDisk
    split.outSeq = seq
    split.outClass = 'UPeel'
    split.Sources[0] = source
    split.flagVer = flagVer
    if uv.GetHighVer('AIPS SN') > 0:
        split.doCalib = 2
        split.gainUse = 0
        split.doCalWt = True
    else:
        split.doCalib = -1
    split.taskLog = taskLog
    split.debug = debug
    split.g
    outClass = split.outClass
    outDisk = split.outDisk
    outSeq = split.outSeq

    # Get data
    OErr.PLog(err, OErr.Info, "Make Peel model with corruptions")
    OErr.printErr(err)
    if UV.AExist(source[0:12], outClass, outDisk, outSeq, err):
        datauv = UV.newPAUV('data', source[0:12], outClass, outDisk, outSeq,
                            True, err)
    else:
        datauv = UV.newPAUV('data', source[0:8], outClass, outDisk, outSeq,
                            True, err)
    # Make data set with the model peel source with peel cal applied
    uvsub = ObitTask('UVSub')
    setname(uv, uvsub)
    uvsub.outName = source[0:12]
    uvsub.outDisk = uvsub.inDisk
    uvsub.outSeq = 1
    uvsub.outClass = 'Model'
    uvsub.Sources[0] = source
    uvsub.flagVer = flagVer
    uvsub.doCalib = -1
    uvsub.gainUse = 0
    set2name(imp, uvsub)
    uvsub.CCVer = 1
    uvsub.nfield = 1
    uvsub.Cmethod = 'DFT'
    uvsub.Opcode = 'MODL'
    uvsub.PBCor = False
    uvsub.noScrat = noScrat
    uvsub.noNeg = False
    uvsub.taskLog = taskLog
    uvsub.nThreads = nThreads
    uvsub.doGPU = doGPU
    uvsub.debug = debug
    uvsub.g

    # Get model data
    modeluv = UV.newPAUV('model', uvsub.outName, uvsub.outClass, uvsub.outDisk,
                         uvsub.outSeq, True, err)
    # Copy/invert/unblank SN table from peeluv
    hiPeelSN = uvp.GetHighVer('AIPS SN')
    inTab = max(1, hiPeelSN)
    sntab = uvp.NewTable(Table.READONLY, 'AIPS SN', inTab, err)
    z = UVSelfCal.PInvertSN(sntab, modeluv, 1, True, err)
    # Apply calibration table in subtract
    modeluv.List.set('doCalSelect', True)
    modeluv.List.set('doCalib', 2)
    modeluv.List.set('gainUse', 1)
    modeluv.List.set('passAll', True)

    # Subtract model from main data
    OErr.PLog(err, OErr.Info, "Subtract Corrupted Peel model from uv data")
    UV.PUtilVisSub(datauv, modeluv, datauv, err)
    OErr.printErr(err)

    # Add model without corrupting calibration
    if addBack:
        OErr.PLog(err, OErr.Info, "Add Peel model without corruptions")
        uvsub = ObitTask('UVSub')
        setname(datauv, uvsub)
        setoname(datauv, uvsub)
        uvsub.outSeq = uvsub.inSeq + 1
        uvsub.Sources[0] = source
        uvsub.flagVer = flagVer
        uvsub.doCalib = -1
        uvsub.gainUse = 0
        set2name(imp, uvsub)
        uvsub.CCVer = 1
        uvsub.nfield = 1
        uvsub.Cmethod = 'DFT'
        uvsub.Factor = -1.
        uvsub.PBCor = False
        uvsub.noScrat = noScrat
        uvsub.noNeg = False
        uvsub.taskLog = taskLog
        uvsub.nThreads = nThreads
        uvsub.doGPU = doGPU
        uvsub.debug = debug
        uvsub.g
        outClass = uvsub.outClass
        outDisk = uvsub.outDisk
        outSeq = uvsub.outSeq
        # end add back
    OErr.printErr(err)
    # Delete model dataset
    if not debug:
        modeluv.Zap(err)

# final data
    if UV.AExist(source[0:12], outClass, outDisk, outSeq, err):
        datauv2 = UV.newPAUV('data', source[0:12], outClass, outDisk, outSeq,
                             True, err)
    else:
        datauv2 = UV.newPAUV('data', source[0:8], outClass, outDisk, outSeq,
                             True, err)
    return datauv2
Ejemplo n.º 10
0
def ConvertKATData(outUV, katdata, meta, err, stop_w=False, timeav=1):
    """
    Read KAT HDF data and write Obit UV

     * outUV    = Obit UV object
     * katdata  = input KAT dataset
     * meta     = dict with data meta data
     * err      = Python Obit Error/message stack to init
    """
    ################################################################
    reffreq = meta["spw"][0][1]  # reference frequency
    lamb = 2.997924562e8 / reffreq  # wavelength of reference freq
    nchan = meta["spw"][0][0]  # number of channels
    nif = len(meta["spw"])  # Number of IFs
    nstok = meta["nstokes"]  # Number of Stokes products
    p = meta["products"]  # baseline stokes indices
    nprod = len(p)  # number of correlations/baselines
    ants = meta["ants"]
    antslookup = {}
    for ant in katdata.ants:
        antslookup[ant.name] = ant
    # work out Start time in unix sec
    tm = katdata.timestamps[1:2]
    tx = time.gmtime(tm[0])
    time0 = tm[0] - tx[3] * 3600.0 - tx[4] * 60.0 - tx[5]

    # Set data to read one vis per IO
    outUV.List.set("nVisPIO", 1)

    # Open data
    zz = outUV.Open(UV.READWRITE, err)
    if err.isErr:
        OErr.printErrMsg(err, "Error opening output UV")
    # visibility record offsets
    d = outUV.Desc.Dict
    ilocu = d['ilocu']
    ilocv = d['ilocv']
    ilocw = d['ilocw']
    iloct = d['iloct']
    ilocb = d['ilocb']
    ilocsu = d['ilocsu']
    nrparm = d['nrparm']
    jlocc = d['jlocc']
    jlocs = d['jlocs']
    jlocf = d['jlocf']
    jlocif = d['jlocif']
    naxes = d['inaxes']
    count = 0.0
    visno = 0
    # Get IO buffers as numpy arrays
    shape = len(outUV.VisBuf) / 4
    buffer = numpy.frombuffer(outUV.VisBuf, dtype=numpy.float32, count=shape)

    # Template vis
    vis = outUV.ReadVis(err, firstVis=1)
    first = True
    firstVis = 1
    numflags = 0
    numvis = 0
    # Do we need to stop Fringes
    if stop_w:
        msg = "W term in UVW coordinates will be used to stop the fringes."
        OErr.PLog(err, OErr.Info, msg)
        OErr.printErr(err)
        print msg
    for scan, state, target in katdata.scans():
        # Fetch data - may blow core
        try:
            tm = katdata.timestamps
            vs = katdata.vis
            wt = katdata.weights
            #fg = katdata.flags
            fg = numpy.zeros_like(wt, dtype=numpy.bool)
        except:
            # Damn, save what you've got
            msg = "Blew core - try to recover"
            OErr.PLog(err, OErr.Info, msg)
            OErr.printErr(err)
            print msg
            outUV.Close(err)
            zz = outUV.Open(UV.READWRITE, err)
            if err.isErr:
                OErr.printErrMsg(err, "Error closing/reopening data")
                return
            continue
        if timeav > 1:
            vs, wt, fg, tm = AverageTime(vs, wt, fg, tm, int(timeav))
            #Lets average the data!!!
            #vs,wt,fg,tm,cf=averager.average_visibilities(vs,wt,fg,tm,katdata.channel_freqs,timeav=int(timeav),chanav=1,flagav=True)
        nint = len(tm)
        #Get target suid
        # Only on targets in the input list
        try:
            suid = meta["targLookup"][target.name[0:16]]
        except:
            continue
        # Negate the weights that are online flagged (ie. apply the online flags here)
        #wt[numpy.where(fg)]=-32767.
        numflags += numpy.sum(fg)
        numvis += fg.size
        uu = numpy.zeros((
            len(tm),
            katdata.shape[2],
        ), dtype=numpy.float64)
        vv = numpy.zeros_like(uu)
        ww = numpy.zeros_like(uu)
        for num, corr_prod in enumerate(katdata.corr_products):
            uvw_coordinates = numpy.array(
                target.uvw(antslookup[corr_prod[0][:4]],
                           timestamp=tm,
                           antenna=antslookup[corr_prod[1][:4]]))
            uu[:, num] = uvw_coordinates[0]
            vv[:, num] = uvw_coordinates[1]
            ww[:, num] = uvw_coordinates[2]
        # Number of integrations
        msg = "Scan:%4d Int: %4d %16s Start %s" % (
            scan, nint, target.name, day2dhms((tm[0] - time0) / 86400.0)[0:12])
        OErr.PLog(err, OErr.Info, msg)
        OErr.printErr(err)
        print msg
        # Loop over integrations
        for iint in range(0, nint):
            vsdump = vs[iint:iint + 1]
            wtdump = wt[iint:iint + 1]
            #loop over baselines
            for ibase in itertools.combinations_with_replacement(ants, 2):
                #loop over polarisations
                for istok in range(0, nstok):
                    icorrprod = (ibase[0][0], ibase[1][0], istok)
                    iprod = p.index(icorrprod)
                    thisvis = vsdump[0, :, iprod:iprod + 1]
                    thiswt = wtdump[0, :, iprod:iprod + 1]
                    #thisw=ww[iint:iint+1,iprod]
                    # Fringe stop the data if necessary
                    #if stop_w:
                    #    thisvis=StopFringes(thisvis[:,:,0],katdata.channel_freqs,thisw,katdata.corr_products[iprod])
                    # Copy slices
                    indx = nrparm + (p[iprod][2]) * 3
                    buffer[indx:indx + (nchan + 1) * nstok * 3:nstok *
                           3] = thisvis.real.flatten()
                    indx += 1
                    buffer[indx:indx + (nchan + 1) * nstok * 3:nstok *
                           3] = thisvis.imag.flatten()
                    indx += 1
                    buffer[indx:indx + (nchan + 1) * nstok * 3:nstok *
                           3] = thiswt.flatten()
                    # Write if Stokes index >= next or the last
                    #if (iprod==nprod-1) or (p[iprod][2]>=p[iprod+1][2]):
                # Random parameters
                buffer[ilocu] = uu[iint][iprod] / lamb
                buffer[ilocv] = vv[iint][iprod] / lamb
                buffer[ilocw] = ww[iint][iprod] / lamb
                buffer[iloct] = (tm[iint] - time0) / 86400.0  # Time in days
                buffer[ilocb] = p[iprod][0] * 256.0 + p[iprod][1]
                buffer[ilocsu] = suid
                outUV.Write(err, firstVis=visno)
                visno += 1
                buffer[3] = -3.14159
                #print visno,buffer[0:5]
                firstVis = None  # Only once
                # initialize visibility
                first = True
        # end loop over integrations
        if err.isErr:
            OErr.printErrMsg(err, "Error writing data")
    # end loop over scan
    if numvis > 0:
        msg = "Applied %s online flags to %s visibilities (%.3f%%)" % (
            numflags, numvis, (float(numflags) / float(numvis) * 100.))
        OErr.PLog(err, OErr.Info, msg)
        OErr.printErr(err)
    outUV.Close(err)
    if err.isErr:
        OErr.printErrMsg(err, "Error closing data")
Ejemplo n.º 11
0
def newPAImage(name, Aname, Aclass, disk, seq, exists, err, verbose=False):
    """
    Create and initialize an AIPS based Image structure
    
    Create, set initial access information (full image, plane at a time)
    and if exists verifies the file.
    Returns the Python Image object
    isOK member set to indicate success

    * name     = name desired for object (labeling purposes)
    * Aname    = AIPS name of file
    * Aclass   = AIPS class of file
    * seq      = AIPS sequence number of file
    * disk     = FITS directory number
    * exists   = if true then the file is opened and closed to verify
    * err      = Python Obit Error/message stack
    * verbose  = If true any give error messages, else suppress
    """
    ################################################################
    out = ImageMF(name)
    out.isOK = True  # until proven otherwise
    cno = -1
    user = OSystem.PGetAIPSuser()
    # print "disk, aseq", disk, seq
    # Does it really previously exist?
    test = AIPSDir.PTestCNO(disk, user, Aname, Aclass, "MA", seq, err)
    out.exist = test > 0
    if exists:  # If user thinks file exists...
        if out.exist:  # If file is defined in catalog -> verify that file exists
            OErr.PLog(err, OErr.Info, Aname + " image found. Now verifying...")
            if verbose: OErr.printErr(err)
            cno = AIPSDir.PFindCNO(disk, user, Aname, Aclass, "MA", seq, err)
            Obit.ImageMFSetAIPS(out.me, 2, disk, cno, user, blc, trc, err.me)
            Obit.ImagefullInstantiate(out.cast("ObitImage"), 1, err.me)
            #print "found",Aname,Aclass,"as",cno
        else:  # If file not defined in catalog -> error
            OErr.PLog(err, OErr.Error, Aname + " image does not exist")
            out.isOK = False
    else:  # exists=False
        # Create new image entry in catalog; if image already defined, this
        # has no effect
        OErr.PLog(err, OErr.Info,
                  "Creating new image: " + Aname + ", " + Aclass)
        if verbose: OErr.printErr(err)
        cno = AIPSDir.PAlloc(disk, user, Aname, Aclass, "MA", seq, err)
        Obit.ImageMFSetAIPS(out.me, 2, disk, cno, user, blc, trc, err.me)
        #print "assigned",Aname,Aclass,"to",cno

    # show any errors if wanted
    if verbose and err.isErr:
        out.isOK = False
        OErr.printErrMsg(err, "Error creating AIPS Image object")
    elif err.isErr:
        out.isOK = False
        OErr.PClear(err)  # Clear unwanted messages
    else:
        OErr.PClear(err)  # Clear non-error messages

    # It work?
    if not out.isOK:
        return out

    # Add File info
    out.FileType = 'AIPS'
    out.Disk = disk
    out.Aname = Aname
    out.Aclass = Aclass
    out.Aseq = seq
    out.Otype = "Image"
    out.Acno = cno
    return out  # seems OK
Ejemplo n.º 12
0
def MKContPipeline(files, outputdir, **kwargs):
    """MeerKAT Continuum pipeline.

    Parameters
    ----------
    files : list
        h5 filenames (note: support for multiple h5 files 
        i.e. ConcatenatedDataSet is not currently supported)
    outputdir : string
        Directory location to write output data, 
    scratchdir : string, optional
        The directory location of the aips disk
    parmFile : string, optional
        Overwrite the default imaging parameters using this parameter file.
    """
    #if len(files) > 1:
    #    raise TooManyKatfilesException('Processing multiple katfiles are not currently supported')
    h5file = files

    # Die gracefully if we cannot write to the output area...
    if not os.path.exists(outputdir):
        print('Specified output directory: '+ outputdir + 'does not exist.')
        exit(-1)

    # Obit error logging
    err = OErr.OErr()

    #################### Initialize filenames #######################################################
    fileRoot      = os.path.join(outputdir, os.path.basename(os.path.splitext(files[0])[0])) # root of file name
    logFile       = fileRoot+".log"   # Processing log file
    avgClass      = ("UVAv")[0:6]  # Averaged data AIPS class
    manifestfile  = outputdir + '/manifest.pickle'

    ############################# Initialize OBIT and AIPS ##########################################
    noScrat     = []
    # Logging directly to logFile
    OErr.PInit(err, 2, logFile)
    EVLAAddOutFile(os.path.basename(logFile), 'project', 'Pipeline log file')

    ObitSys = AIPSSetup.AIPSSetup(err,configfile=kwargs.get('configFile'),scratchdir=kwargs.get('scratchdir'))

    # Get the set up AIPS environment.
    AIPS_ROOT    = os.environ['AIPS_ROOT']
    AIPS_VERSION = os.environ['AIPS_VERSION']

    nThreads = 24
    user = OSystem.PGetAIPSuser()
    AIPS.userno = user
    disk = 1
    fitsdisk = 0
    nam = os.path.basename(os.path.splitext(files[0])[0])[0:10]
    cls = "Raw"
    seq = 1

    ############################# Initialise Parameters ##########################################
    ####### Initialize parameters dictionary ##### 
    parms = KATInitContParms()
    ####### User defined parameters ######
    if kwargs.get('parmFile'):
        print("parmFile",kwargs.get('parmFile'))
        exec(open(kwargs.get('parmFile')).read())
        EVLAAddOutFile(os.path.basename(kwargs.get('parmFile')), 'project', 'Pipeline input parameters' )

    ############### Initialize katfile object, uvfits object and condition data #########################
    OK = False
    # Open the h5 file as a katfile object
    try:
        #open katfile and perform selection according to kwargs
        katdata = katfile.open(h5file)
        OK = True
    except Exception as exception:
        print(exception)
    if not OK:
        OErr.PSet(err)
        OErr.PLog(err, OErr.Fatal, "Unable to read KAT HDF5 data in " + str(h5file))
        raise KATUnimageableError("Unable to read KAT HDF5 data in " + str(h5file))

    #We have a katdal object- read some flags and ad them in in available
    if kwargs.get('flags') is not None:
        flags=kwargs.get('flags')
        fa = flags.split(',')
        for fn,ff in enumerate(fa):
	        ex_flags_file = h5py.File(ff)
	        ex_flags = da.from_array(ex_flags_file['flags'], chunks=(1,342,katdata.shape[2]))
	        #Sum the new flags 
        	katdata.datasets[fn].source.data.flags = ex_flags

    #Are we MeerKAT or KAT-7
    telescope = katdata.ants[0].name[0]
    if telescope=='m':
        sefd=500.
    else:
        sefd=1200.
    #Get calibrator models
    fluxcals = katpoint.Catalogue(file(FITSDir.FITSdisks[0]+"/"+parms["fluxModel"]))
    #Condition data (get bpcals, update names for aips conventions etc)
    KATh5Condition(katdata,fluxcals,err)

    ###################### Data selection and static edits ############################################
    # Select data based on static imageable parameters
    MKATh5Select(katdata, parms, err, **kwargs)

    ####################### Import data into AIPS #####################################################
    # Construct a template uvfits file from master template
    mastertemplate=ObitTalkUtil.FITSDir.FITSdisks[fitsdisk]+'MKATTemplate.uvtab.gz'
    outtemplate=nam+'.uvtemp'
    KATH5toAIPS.MakeTemplate(mastertemplate,outtemplate,len(katdata.channel_freqs))
    uv=OTObit.uvlod(outtemplate,0,nam,cls,disk,seq,err)

    obsdata = KATH5toAIPS.KAT2AIPS(katdata, uv, disk, fitsdisk, err, calInt=1.0, **kwargs)
    MakeIFs.UVMakeIF(uv,8,err)

    # Print the uv data header to screen.
    uv.Header(err)
    os.remove(outtemplate)
    ############################# Set Project Processing parameters ###################################
    # Parameters derived from obsdata and katdata
    MKATGetObsParms(obsdata, katdata, parms, logFile)

    ###### Initialise target parameters #####
    KATInitTargParms(katdata,parms,err)

    # General AIPS data parameters at script level
    dataClass = ("UVDa")[0:6]      # AIPS class of raw uv data
    band      = katdata.spectral_windows[0].product #Correlator product
    project   = parms["project"][0:12]  # Project name (12 char or less, used as AIPS Name)
    outIClass = parms["outIClass"] # image AIPS class
    debug     = parms["debug"]
    check     = parms["check"]

    # Load the outputs pickle jar
    EVLAFetchOutFiles()

    OSystem.PAllowThreads(nThreads)   # Allow threads in Obit/oython
    retCode = 0

    maxgap = max(parms["CalAvgTime"],20*katdata.dump_period)/60.
    ################### Start processing ###############################################################

    mess = "Start project "+parms["project"]+" AIPS user no. "+str(AIPS.userno)+\
           ", KAT7 configuration "+parms["KAT7Cfg"]
    printMess(mess, logFile)
    if debug:
        pydoc.ttypager = pydoc.plainpager # don't page task input displays
        mess = "Using Debug mode "
        printMess(mess, logFile)
    if check:
        mess = "Only checking script"
        printMess(mess, logFile)

    # Log parameters
    printMess("Parameter settings", logFile)
    for p in parms:
        mess = "  "+p+": "+str(parms[p])
        printMess(mess, logFile)
    clist = []
    for DCal in parms["DCals"]:
        if DCal["Source"] not in clist:
            clist.append(DCal["Source"])
    for PCal in parms["PCals"]:
        if PCal["Source"] not in clist:
            clist.append(PCal["Source"])
    for ACal in parms["ACals"]:
        if ACal["Source"] not in clist:
            clist.append(ACal["Source"])
    if kwargs.get('targets') is not None:
        targets = [targ.name for targ in katdata.catalogue if (targ.name not in clist) and (targ.name in kwargs.get('targets').split(','))]
    else:
        targets = [targ.name for targ in katdata.catalogue if (targ.name not in clist)]
    refAnt = FetchObject(fileRoot+".refAnt.pickle")

    # Save parameters to pickle jar, manifest
    ParmsPicklefile = fileRoot+".Parms.pickle"   # Where results saved
    SaveObject(parms, ParmsPicklefile, True)
    EVLAAddOutFile(os.path.basename(ParmsPicklefile), 'project', 'Processing parameters used' )
    loadClass = dataClass

    retCode = KATCalAvg (uv, "PREAVG", parms["seq"], parms["CalAvgTime"], err, \
                              flagVer=-1, doCalib=-1, gainUse=-1, doBand=-1, BPVer=-1, doPol=False, \
                              avgFreq=0, chAvg=1, BChan=1, EChan=0, doAuto=parms["doAuto"], Stokes=' ',\
                              BIF=parms["CABIF"], EIF=parms["CAEIF"], Compress=parms["Compress"], \
                              nThreads=nThreads, logfile=logFile, check=check, debug=debug)
    if retCode!=0:
        raise  RuntimeError("Error in CalAvg")
    uv.Zap(err)
    # Get initially averaged data
    if not check:
        uv = UV.newPAUV("AIPS UV DATA", EVLAAIPSName(project), "PREAVG", \
                        disk, parms["seq"], True, err)
        if err.isErr:
            OErr.printErrMsg(err, "Error creating initial avg AIPS data")

    # Hanning
    parms["doHann"]=True
    if parms["doHann"]:
       # Set uv if not done
        if uv==None and not check:
            uv = UV.newPAUV("AIPS UV DATA", EVLAAIPSName(project), loadClass[0:6], disk, parms["seq"], True, err)
            if err.isErr:
                OErr.printErrMsg(err, "Error creating AIPS data")

        uv = KATHann(uv, EVLAAIPSName(project), dataClass, disk, parms["seq"], err, \
                      doDescm=parms["doDescm"], flagVer=0, logfile=logFile, check=check, debug=debug)
        #Halve channels after hanning.
        parms["selChan"]=int(parms["selChan"]/2)
        parms["BChDrop"]=int(parms["BChDrop"]/2)
        parms["EChDrop"]=int(parms["EChDrop"]/2)
        if uv==None and not check:
            raise RuntimeError("Cannot Hann data ")
 
    # Clear any old calibration/editing 
    if parms["doClearTab"]:
        mess =  "Clear previous calibration"
        printMess(mess, logFile)
        EVLAClearCal(uv, err, doGain=parms["doClearGain"], doFlag=parms["doClearFlag"], doBP=parms["doClearBP"], check=check)
        OErr.printErrMsg(err, "Error resetting calibration")

        # Quack to remove data from start and end of each scan
    if parms["doQuack"]:
        retCode = EVLAQuack (uv, err, begDrop=parms["quackBegDrop"], endDrop=parms["quackEndDrop"], \
                             Reason=parms["quackReason"], \
                             logfile=logFile, check=check, debug=debug)
        if retCode!=0:
            raise RuntimeError("Error Quacking data")
    
    # Flag antennas shadowed by others?
    if parms["doShad"]:
        retCode = EVLAShadow (uv, err, shadBl=parms["shadBl"], \
                              logfile=logFile, check=check, debug=debug)
        if retCode!=0:
            raise RuntimeError("Error Shadow flagging data")
    

    # Median window time editing, for RFI impulsive in time
    if parms["doMednTD1"]:
        mess =  "Median window time editing, for RFI impulsive in time:"
        printMess(mess, logFile)
        retCode = EVLAMedianFlag (uv, clist, err, noScrat=noScrat, nThreads=nThreads, \
                                  avgTime=parms["mednAvgTime"], avgFreq=parms["mednAvgFreq"],  chAvg= parms["mednChAvg"], \
                                  timeWind=parms["mednTimeWind"],flagVer=2, flagTab=2,flagSig=parms["mednSigma"], \
                                  logfile=logFile, check=check, debug=False)
        if retCode!=0:
            raise RuntimeError("Error in MednFlag")
    
    # Median window frequency editing, for RFI impulsive in frequency
    if parms["doFD1"]:
        mess =  "Median window frequency editing, for RFI impulsive in frequency:"
        printMess(mess, logFile)
        retCode = EVLAAutoFlag (uv, clist, err, flagVer=2, flagTab=2, doCalib=-1, doBand=-1,   \
                                timeAvg=parms["FD1TimeAvg"], \
                                doFD=True, FDmaxAmp=1.0e20, FDmaxV=1.0e20, FDwidMW=parms["FD1widMW"],  \
                                FDmaxRMS=[1.0e20,0.1], FDmaxRes=parms["FD1maxRes"],  \
                                FDmaxResBL= parms["FD1maxRes"],  FDbaseSel=parms["FD1baseSel"],\
                                nThreads=nThreads, logfile=logFile, check=check, debug=debug)
        if retCode!=0:
           raise  RuntimeError("Error in AutoFlag")
    
    # Parallactic angle correction?
    if parms["doPACor"]:
        retCode = EVLAPACor(uv, err, \
                                logfile=logFile, check=check, debug=debug)
        if retCode!=0:
            raise RuntimeError("Error in Parallactic angle correction")
    
    # Need to find a reference antenna?  See if we have saved it?
    if (parms["refAnt"]<=0):
        refAnt = FetchObject(fileRoot+".refAnt.pickle")
        if refAnt:
            parms["refAnt"] = refAnt

    # Use bandpass calibrator and center half of each spectrum
    if parms["refAnt"]<=0:
        mess = "Find best reference antenna: run Calib on BP Cal(s) "
        printMess(mess, logFile)
        parms["refAnt"] = EVLAGetRefAnt(uv, parms["BPCals"], err, flagVer=0, \
                                        solInt=parms["bpsolint1"], nThreads=nThreads, \
                                        logfile=logFile, check=check, debug=debug)
        if err.isErr:
                raise  RuntimeError("Error finding reference antenna")
        if parms["refAnts"][0]<=0:
            parms["refAnts"][0] = parms["refAnt"]
        mess = "Picked reference antenna "+str(parms["refAnt"])
        printMess(mess, logFile)
        # Save it
        ParmsPicklefile = fileRoot+".Parms.pickle"   # Where results saved
        SaveObject(parms, ParmsPicklefile, True)
        refAntPicklefile = fileRoot+".refAnt.pickle"   # Where results saved
        SaveObject(parms["refAnt"], refAntPicklefile, True)


    # Plot Raw, edited data?
    parms["doRawSpecPlot"]=False
    parms["doSpecPlot"]=False
    if parms["doRawSpecPlot"] and parms["plotSource"]:
        mess =  "Raw Spectral plot for: "+' '.join(parms["BPCal"])
        printMess(mess, logFile)
        plotFile = fileRoot+"_RawSpec.ps"
        retCode = EVLASpectrum(uv, parms["BPCal"], parms["plotTime"], maxgap, plotFile, parms["refAnt"], err, \
                               Stokes=["RR","LL"], doband=-1,          \
                               check=check, debug=debug, logfile=logFile )
        if retCode!=0:
            raise  RuntimeError("Error in Plotting spectrum")
        EVLAAddOutFile(plotFile, 'project', 'Pipeline log file' )

    # delay calibration
    if parms["doDelayCal"] and parms["DCals"] and not check:
        plotFile = fileRoot+"_DelayCal.ps"
        retCode = EVLADelayCal(uv, parms["DCals"], err,  \
                               BChan=parms["delayBChan"], EChan=parms["delayEChan"], \
                               doCalib=2, flagVer=0, doBand=-1, \
                               solInt=parms["delaySolInt"], smoTime=parms["delaySmoo"],  \
                               refAnts=[parms["refAnt"]], doTwo=parms["doTwo"], 
                               doZeroPhs=parms["delayZeroPhs"], \
                               doPlot=parms["doSNPlot"], plotFile=plotFile, \
                               nThreads=nThreads, noScrat=noScrat, \
                               logfile=logFile, check=check, debug=debug)
        if retCode!=0:
            raise RuntimeError("Error in delay calibration")

        # Plot corrected data?
        if parms["doSpecPlot"] and parms["plotSource"]:
            plotFile = fileRoot+"_DelaySpec.ps"
            retCode = EVLASpectrum(uv, parms["BPCal"], parms["plotTime"], maxgap, \
                                   plotFile, parms["refAnt"], err, \
                                   Stokes=["RR","LL"], doband=-1,          \
                                   check=check, debug=debug, logfile=logFile )
            if retCode!=0:
                raise  RuntimeError("Error in Plotting spectrum")
	print(parms["bpBChan1"],parms["bpEChan1"],parms["bpBChan2"],parms["bpEChan2"],parms["bpChWid2"])
    # Bandpass calibration
    if parms["doBPCal"] and parms["BPCals"]:
        retCode = KATBPCal(uv, parms["BPCals"], err, noScrat=noScrat, solInt1=parms["bpsolint1"], \
                            solInt2=parms["bpsolint2"], solMode=parms["bpsolMode"], \
                            BChan1=parms["bpBChan1"], EChan1=parms["bpEChan1"], \
                            BChan2=parms["bpBChan2"], EChan2=parms["bpEChan2"], ChWid2=parms["bpChWid2"], \
                            doCenter1=parms["bpDoCenter1"], refAnt=parms["refAnt"], \
                            UVRange=parms["bpUVRange"], doCalib=2, gainUse=0, flagVer=0, doPlot=False, \
                            nThreads=nThreads, logfile=logFile, check=check, debug=debug)
        if retCode!=0:
            raise RuntimeError("Error in Bandpass calibration")

        # Plot corrected data?
        if parms["doSpecPlot"] and  parms["plotSource"]:
            plotFile = fileRoot+"_BPSpec.ps"
            retCode = EVLASpectrum(uv, parms["BPCal"], parms["plotTime"], maxgap, plotFile, \
                                   parms["refAnt"], err, Stokes=["RR","LL"], doband=1,          \
                                   check=check, debug=debug, logfile=logFile )
            if retCode!=0:
                raise  RuntimeError("Error in Plotting spectrum")

    # Amp & phase Calibrate
    if parms["doAmpPhaseCal"]:
        plotFile = fileRoot+"_APCal.ps"
        retCode = KATCalAP (uv, [], parms["ACals"], err, PCals=parms["PCals"], 
                             doCalib=2, doBand=1, BPVer=1, flagVer=0, \
                             BChan=parms["ampBChan"], EChan=parms["ampEChan"], \
                             solInt=parms["solInt"], solSmo=parms["solSmo"], ampScalar=parms["ampScalar"], \
                             doAmpEdit=parms["doAmpEdit"], ampSigma=parms["ampSigma"], \
                             ampEditFG=parms["ampEditFG"], \
                             doPlot=parms["doSNPlot"], plotFile=plotFile,  refAnt=parms["refAnt"], \
                             nThreads=nThreads, noScrat=noScrat, logfile=logFile, check=check, debug=debug)
        #print parms["ACals"],parms["PCals"]
        if retCode!=0:
            raise RuntimeError("Error calibrating")

    # More editing

    if parms["doAutoFlag"]:
        mess =  "Post calibration editing:"
        printMess(mess, logFile)
        # if going to redo then only calibrators
        if parms["doRecal"]:
            # Only calibrators
            clist = []
            for DCal in parms["DCals"]:
                if DCal["Source"] not in clist:
                    clist.append(DCal["Source"])
            for PCal in parms["PCals"]:
                if PCal["Source"] not in clist:
                    clist.append(PCal["Source"])
            for ACal in parms["ACals"]:
                if ACal["Source"] not in clist:
                    clist.append(ACal["Source"])
        else:
            clist = []

        retCode = EVLAAutoFlag (uv, clist, err, flagVer=0, flagTab =2, \
                                doCalib=2, gainUse=0, doBand=1, BPVer=1,  \
                                IClip=parms["IClip"], minAmp=parms["minAmp"], timeAvg=parms["timeAvg"], \
                                doFD=parms["doAFFD"], FDmaxAmp=parms["FDmaxAmp"], FDmaxV=parms["FDmaxV"], \
                                FDwidMW=parms["FDwidMW"], FDmaxRMS=parms["FDmaxRMS"], \
                                FDmaxRes=parms["FDmaxRes"],  FDmaxResBL=parms["FDmaxResBL"], \
                                FDbaseSel=parms["FDbaseSel"], \
                                nThreads=nThreads, logfile=logFile, check=check, debug=debug)
        if retCode!=0:
           raise  RuntimeError("Error in AutoFlag")

    # Redo the calibration using new flagging?
    if parms["doBPCal2"]==None:
        parms["doBPCal2"] = parms["doBPCal"]
    if parms["doDelayCal2"]==None:
        parms["doDelayCal2"] = parms["doDelayCal2"]
    if parms["doAmpPhaseCal2"]==None:
        parms["doAmpPhaseCal2"] = parms["doAmpPhaseCal"]
    if parms["doAutoFlag2"]==None:
        parms["doAutoFlagCal2"] = parms["doAutoFlag"]
    if parms["doRecal"]:
        mess =  "Redo calibration:"
        printMess(mess, logFile)
        EVLAClearCal(uv, err, doGain=True, doFlag=False, doBP=True, check=check, logfile=logFile)
        OErr.printErrMsg(err, "Error resetting calibration")
        # Parallactic angle correction?
        if parms["doPACor"]:
            retCode = EVLAPACor(uv, err, \
                                logfile=logFile, check=check, debug=debug)
            if retCode!=0:
                raise RuntimeError("Error in Parallactic angle correction")

        # Delay recalibration
        if parms["doDelayCal2"] and parms["DCals"] and not check:
            plotFile = fileRoot+"_DelayCal2.ps"
            retCode = EVLADelayCal(uv, parms["DCals"], err, \
                                   BChan=parms["delayBChan"], EChan=parms["delayEChan"], \
                                   doCalib=2, flagVer=0, doBand=-1, \
                                   solInt=parms["delaySolInt"], smoTime=parms["delaySmoo"],  \
                                   refAnts=[parms["refAnt"]], doTwo=parms["doTwo"], \
                                   doZeroPhs=parms["delayZeroPhs"], \
                                   doPlot=parms["doSNPlot"], plotFile=plotFile, \
                                   nThreads=nThreads, noScrat=noScrat, \
                                   logfile=logFile, check=check, debug=debug)
            if retCode!=0:
                raise RuntimeError("Error in delay calibration")

            # Plot corrected data?
            if parms["doSpecPlot"] and parms["plotSource"]:
                plotFile = fileRoot+"_DelaySpec2.ps"
                retCode = EVLASpectrum(uv, parms["BPCal"], parms["plotTime"], maxgap, plotFile, parms["refAnt"], err, \
                                       Stokes=["RR","LL"], doband=-1,          \
                                       check=check, debug=debug, logfile=logFile )
                if retCode!=0:
                    raise  RuntimeError("Error in Plotting spectrum")

        # Bandpass calibration
        if parms["doBPCal2"] and parms["BPCals"]:
            retCode = KATBPCal(uv, parms["BPCals"], err, noScrat=noScrat, solInt1=parms["bpsolint1"], \
                            solInt2=parms["bpsolint2"], solMode=parms["bpsolMode"], \
                            BChan1=parms["bpBChan1"], EChan1=parms["bpEChan1"], \
                            BChan2=parms["bpBChan2"], EChan2=parms["bpEChan2"], ChWid2=parms["bpChWid2"], \
                            doCenter1=parms["bpDoCenter1"], refAnt=parms["refAnt"], \
                            UVRange=parms["bpUVRange"], doCalib=2, gainUse=0, flagVer=0, doPlot=False, \
                            nThreads=nThreads, logfile=logFile, check=check, debug=debug)
            if retCode!=0:
                raise RuntimeError("Error in Bandpass calibration")
        
            # Plot corrected data?
            if parms["doSpecPlot"] and parms["plotSource"]:
                plotFile = fileRoot+"_BPSpec2.ps"
                retCode = EVLASpectrum(uv, parms["BPCal"], parms["plotTime"], maxgap, plotFile, parms["refAnt"], err, \
                                   Stokes=["RR","LL"], doband=1,          \
                                   check=check, debug=debug, logfile=logFile )
            if retCode!=0:
                raise  RuntimeError("Error in Plotting spectrum")

        # Amp & phase Recalibrate
        if parms["doAmpPhaseCal2"]:
            plotFile = fileRoot+"_APCal2.ps"
            retCode = KATCalAP (uv, [], parms["ACals"], err, PCals=parms["PCals"], \
                                 doCalib=2, doBand=2, BPVer=1, flagVer=0, \
                                 BChan=parms["ampBChan"], EChan=parms["ampEChan"], \
                                 solInt=parms["solInt"], solSmo=parms["solSmo"], ampScalar=parms["ampScalar"], \
                                 doAmpEdit=True, ampSigma=parms["ampSigma"], \
                                 ampEditFG=parms["ampEditFG"], \
                                 doPlot=parms["doSNPlot"], plotFile=plotFile, refAnt=parms["refAnt"], \
                                 noScrat=noScrat, nThreads=nThreads, logfile=logFile, check=check, debug=debug)
            if retCode!=0:
                raise RuntimeError("Error calibrating")

        # More editing
        if parms["doAutoFlag2"]:
            mess =  "Post recalibration editing:"
            printMess(mess, logFile)
            retCode = EVLAAutoFlag (uv, [], err, flagVer=0, flagTab=2, \
                                    doCalib=2, gainUse=0, doBand=1, BPVer=1,  \
                                    IClip=parms["IClip"], minAmp=parms["minAmp"], timeAvg=parms["timeAvg"], \
                                    doFD=parms["doAFFD"], FDmaxAmp=parms["FDmaxAmp"], FDmaxV=parms["FDmaxV"], \
                                    FDwidMW=parms["FDwidMW"], FDmaxRMS=parms["FDmaxRMS"], \
                                    FDmaxRes=parms["FDmaxRes"],  FDmaxResBL= parms["FDmaxResBL"], \
                                    FDbaseSel=parms["FDbaseSel"], \
                                    nThreads=nThreads, logfile=logFile, check=check, debug=debug)
            if retCode!=0:
                raise  RuntimeError("Error in AutoFlag")
    # end recal
    # Calibrate and average data
    if parms["doCalAvg"]:
        retCode = KATCalAvg (uv, avgClass, parms["seq"], parms["CalAvgTime"], err, \
                              flagVer=2, doCalib=2, gainUse=0, doBand=1, BPVer=1, doPol=False, \
                              avgFreq=parms["avgFreq"], chAvg=parms["chAvg"], \
                              BChan=1, EChan=parms["selChan"] - 1, doAuto=parms["doAuto"], \
                              BIF=parms["CABIF"], EIF=parms["CAEIF"], Compress=parms["Compress"], \
                              nThreads=nThreads, logfile=logFile, check=check, debug=debug)
        if retCode!=0:
           raise  RuntimeError("Error in CalAvg")

    # Get calibrated/averaged data
    if not check:
        uv = UV.newPAUV("AIPS UV DATA", EVLAAIPSName(project), avgClass[0:6], \
                        disk, parms["seq"], True, err)
        if err.isErr:
            OErr.printErrMsg(err, "Error creating cal/avg AIPS data")

    KATUVFITS(uv, 'preimage.uvfits', 0, err, exclude=["AIPS HI", "AIPS SL", "AIPS PL"], include=["AIPS AN", "AIPS FQ"], compress=parms["Compress"], logfile=logFile)
    KATUVFITab(uv, 'preimage.uvtab', 0, err)
    # XClip
    if parms["XClip"] and parms["XClip"]>0.0:
        mess =  "Cross Pol clipping:"
        printMess(mess, logFile)
        retCode = EVLAAutoFlag (uv, [], err, flagVer=-1, flagTab=1, \
                                doCalib=2, gainUse=0, doBand=-1, maxBad=1.0,  \
                                XClip=parms["XClip"], timeAvg=1./60., \
                                nThreads=nThreads, logfile=logFile, check=check, debug=debug)
        if retCode!=0:
            raise  RuntimeError("Error in AutoFlag")
    
    # R-L  delay calibration cal if needed,
    if parms["doRLDelay"] and parms["RLDCal"][0][0]!=None:
        if parms["rlrefAnt"]<=0:
            parms["rlrefAnt"] =  parms["refAnt"]
        # parms["rlDoBand"] if before average, BPVer=parms["rlBPVer"], 
        retCode = EVLARLDelay(uv, err,\
                              RLDCal=parms["RLDCal"], BChan=parms["rlBChan"], \
                              EChan=parms["rlEChan"], UVRange=parms["rlUVRange"], \
                              soucode=parms["rlCalCode"], doCalib=parms["rlDoCal"], gainUse=parms["rlgainUse"], \
                              timerange=parms["rltimerange"], \
                              # NOT HERE doBand=parms["rlDoBand"], BPVer=parms["rlBPVer"],  \
                              flagVer=parms["rlflagVer"], \
                              refAnt=parms["rlrefAnt"], doPol=False,  \
                              nThreads=nThreads, noScrat=noScrat, logfile=logFile, \
                              check=check, debug=debug)
        if retCode!=0:
            raise RuntimeError("Error in R-L delay calibration")
    
    # Polarization calibration
    if parms["doPolCal"]:
        if parms["PCRefAnt"]<=0:
            parms["PCRefAnt"] =  parms["refAnt"]
        retCode = EVLAPolCal(uv, parms["PCInsCals"], err, \
                             doCalib=2, gainUse=0, doBand=-1, flagVer=0, \
                             fixPoln=parms["PCFixPoln"], pmodel=parms["PCpmodel"], avgIF=parms["PCAvgIF"], \
                             solInt=parms["PCSolInt"], refAnt=parms["PCRefAnt"], solType=parms["PCSolType"], \
                             ChInc=parms["PCChInc"], ChWid=parms["PCChWid"], \
                             nThreads=nThreads, check=check, debug=debug, noScrat=noScrat, logfile=logFile)
        if retCode!=0 and (not check):
           raise  RuntimeError("Error in polarization calibration: "+str(retCode))
        # end poln cal.
    
    
    # R-L phase calibration cal., creates new BP table
    if parms["doRLCal"] and parms["RLDCal"][0][0]!=None:
        plotFile = fileRoot+"_RLSpec2.ps"
        if parms["rlrefAnt"]<=0:
            parms["rlrefAnt"] =  parms["refAnt"]
        retCode = EVLARLCal(uv, err,\
                            RLDCal=parms["RLDCal"], BChan=parms["rlBChan"],
                            EChan=parms["rlEChan"], UVRange=parms["rlUVRange"], \
                            ChWid2=parms["rlChWid"], solInt1=parms["rlsolint1"], solInt2=parms["rlsolint2"], \
                            RLPCal=parms["RLPCal"], RLPhase=parms["RLPhase"], \
                            RM=parms["RLRM"], CleanRad=parms["rlCleanRad"], \
                            calcode=parms["rlCalCode"], doCalib=parms["rlDoCal"], gainUse=parms["rlgainUse"], \
                            timerange=parms["rltimerange"], FOV=parms["rlFOV"], \
                            doBand=-1, BPVer=1, flagVer=parms["rlflagVer"], \
                            refAnt=parms["rlrefAnt"], doPol=parms["doPol"], PDVer=parms["PDVer"],  \
                            doPlot=parms["doSpecPlot"], plotFile=plotFile, \
                            nThreads=nThreads, noScrat=noScrat, logfile=logFile, \
                            check=check, debug=debug)
        if retCode!=0:
            raise RuntimeError("Error in RL phase spectrum calibration")
    
    # VClip
    if parms["VClip"] and parms["VClip"]>0.0:
        mess =  "VPol clipping:"
        printMess(mess, logFile)
        retCode = EVLAAutoFlag (uv, [], err, flagVer=-1, flagTab=1, \
                                doCalib=2, gainUse=0, doBand=-1,  \
                                VClip=parms["VClip"], timeAvg=parms["timeAvg"], \
                                nThreads=nThreads, logfile=logFile, check=check, debug=debug)
        if retCode!=0:
            raise  RuntimeError("Error in AutoFlag VClip")
    
    # Plot corrected data?
    parms["doSpecPlot"]=True
    if parms["doSpecPlot"] and parms["plotSource"]:
        plotFile = fileRoot+"_Spec.ps"
        retCode = EVLASpectrum(uv, parms["BPCal"], parms["plotTime"], maxgap, \
                               plotFile, parms["refAnt"], err, \
                               Stokes=["I"], doband=-1,          \
                               check=check, debug=debug, logfile=logFile )
        if retCode!=0:
            raise  RuntimeError("Error in Plotting spectrum")
    
    
    # Image targets
    if parms["doImage"]:
        # If targets not specified, image all
        if len(parms["targets"])<=0:
            slist = EVLAAllSource(uv,err,logfile=logFile,check=check,debug=debug)
        else:
            slist = targets
        slist=targets
        KATImageTargets (uv, err, Sources=slist, seq=parms["seq"], sclass=outIClass, OutlierArea=parms["outlierArea"],\
                          doCalib=-1, doBand=-1,  flagVer=-1, doPol=parms["doPol"], PDVer=parms["PDVer"],  \
                          Stokes=parms["Stokes"], FOV=parms["FOV"], Robust=parms["Robust"], Niter=parms["Niter"], \
                          CleanRad=parms["CleanRad"], minFlux=parms["minFlux"], OutlierSize=parms["OutlierSize"], \
                          xCells=parms["xCells"], yCells=parms["yCells"], Reuse=parms["Reuse"], minPatch=parms["minPatch"], \
                          maxPSCLoop=parms["maxPSCLoop"], minFluxPSC=parms["minFluxPSC"], noNeg=parms["noNeg"], \
                          solPInt=parms["solPInt"], solPMode=parms["solPMode"], solPType=parms["solPType"], \
                          maxASCLoop=parms["maxASCLoop"], minFluxASC=parms["minFluxASC"], nx=parms["nx"], ny=parms["ny"], \
                          solAInt=parms["solAInt"], solAMode=parms["solAMode"], solAType=parms["solAType"], \
                          avgPol=parms["avgPol"], avgIF=parms["avgIF"], minSNR = parms["minSNR"], refAnt=parms["refAnt"], \
                          do3D=parms["do3D"], BLFact=parms["BLFact"], BLchAvg=parms["BLchAvg"], \
                          doMB=parms["doMB"], norder=parms["MBnorder"], maxFBW=parms["MBmaxFBW"], \
                          PBCor=parms["PBCor"],antSize=parms["antSize"], autoCen=parms["autoCen"], \
                          nTaper=parms["nTaper"], Tapers=parms["Tapers"], sefd=sefd, \
                          nThreads=nThreads, noScrat=noScrat, logfile=logFile, check=check, debug=False)
        # End image
    
    # Get report on sources
    if parms["doReport"]:
        # If targets not specified, do all
        if len(parms["targets"])<=0:
            slist = EVLAAllSource(uv,err,logfile=logFile,check=check,debug=debug)
        else:
            slist = parms["targets"]
        Report = EVLAReportTargets(uv, err, Sources=slist, seq=parms["seq"], sclass=outIClass, \
                                       Stokes=parms["Stokes"], logfile=logFile, check=check, debug=debug)
        # Save to pickle jar
        ReportPicklefile = fileRoot+"_Report.pickle"   # Where results saved
        SaveObject(Report, ReportPicklefile, True) 
       
    # Write results, cleanup    
    # Save cal/average UV data? 
    if parms["doSaveUV"] and (not check):
        Aname = EVLAAIPSName(project)
        cno = AIPSDir.PTestCNO(disk, user, Aname, avgClass[0:6], "UV", parms["seq"], err)
        if cno>0:
            uvt = UV.newPAUV("AIPS CAL UV DATA", Aname, avgClass, disk, parms["seq"], True, err)
            filename = fileRoot+"_Cal.uvtab"
            KATUVFITS (uv, filename, 0, err, exclude=["AIPS HI", "AIPS SL", "AIPS PL"], include=["AIPS AN", "AIPS FQ"], compress=parms["Compress"], logfile=logFile)
            EVLAAddOutFile(os.path.basename(filename), 'project', "Calibrated Averaged UV data" )
            # Save list of output files
            EVLASaveOutFiles(manifestfile)
            del uvt
    # Imaging results
    # If targets not specified, save all
    if len(parms["targets"])<=0:
        slist = EVLAAllSource(uv,err,logfile=logFile,check=check,debug=debug)
    else:
        slist = parms["targets"]
    for target in slist:
        if parms["doSaveImg"] and (not check):
            for s in parms["Stokes"]:
                oclass = s+outIClass[1:]
                outname = target
                # Test if image exists
                cno = AIPSDir.PTestCNO(disk, user, outname, oclass, "MA", parms["seq"], err)
                #print cno
                if cno <= 0 :
                    continue
                x = Image.newPAImage("out", outname, oclass, disk, parms["seq"], True, err)
                outfilefits = fileRoot+'_'+target+"."+oclass+".fits"
                xf = KATImFITS(x, outfilefits, 0, err, logfile=logFile)
                x = Image.newPAImage("out", outname, oclass, disk, parms["seq"], True, err)
                outfile = fileRoot+'_'+target+"."+oclass+".fittab.fits"
                xf = EVLAImFITS (x, outfile, 0, err, logfile=logFile)
                EVLAAddOutFile(outfile, target, 'Image of '+ target)
                # Statistics
                zz=imstat(x, err, logfile=logFile)
                # Make a Jpeg image
                FITS2jpeg.fits2jpeg(outfilefits,chans=1,contrast=0.05,cmap='jet',area=0.7)
                EVLAAddOutFile(outfile.replace('.fits','.jpeg'), target, 'Jpeg image of '+ target)
    # end writing loop
    
    # Save list of output files
    EVLASaveOutFiles(manifestfile)
    OErr.printErrMsg(err, "Writing output")
    
    # Contour plots
    if parms["doKntrPlots"]:
        mess = "INFO --> Contour plots (doKntrPlots)"
        printMess(mess, logFile)
        EVLAKntrPlots( err, imName=parms["targets"], project=fileRoot,
                       disk=disk, debug=debug )
        # Save list of output files
        EVLASaveOutFiles(manifestfile)
    elif debug:
        mess = "Not creating contour plots ( doKntrPlots = "+str(parms["doKntrPlots"])+ " )"
        printMess(mess, logFile)

    # Source uv plane diagnostic plots
    if parms["doDiagPlots"]:
        mess = "INFO --> Diagnostic plots (doDiagPlots)"
        printMess(mess, logFile)
        # Get the highest number avgClass catalog file
        Aname = EVLAAIPSName( project )
        uvc = None
        if not check:
            uvname = project+"_Cal"
            uvc = UV.newPAUV(uvname, Aname, avgClass, disk, parms["seq"], True, err)
        EVLADiagPlots( uvc, err, cleanUp=parms["doCleanup"], \
                           project=fileRoot, \
                           logfile=logFile, check=check, debug=debug )
        # Save list of output files
        EVLASaveOutFiles(manifestfile)
    elif debug:
        mess = "Not creating diagnostic plots ( doDiagPlots = "+str(parms["doDiagPlots"])+ " )"
        printMess(mess, logFile)
    
    # Save metadata
    srcMetadata = None
    projMetadata = None
    if parms["doMetadata"]:
        mess = "INFO --> Save metadata (doMetadata)"
        printMess(mess, logFile)
        uvc = None
        if not uvc:
            # Get calibrated/averaged data
            Aname = EVLAAIPSName(project)
            uvname = project+"_Cal"
            uvc = UV.newPAUV(uvname, Aname, avgClass, disk, parms["seq"], True, err)
            if err.isErr:
                OErr.printErrMsg(err, "Error creating cal/avg AIPS data")
    
        # Get source metadata; save to pickle file
        srcMetadata = EVLASrcMetadata( uvc, err, Sources=parms["targets"], seq=parms["seq"], \
                                       sclass=outIClass, Stokes=parms["Stokes"],\
                                       logfile=logFile, check=check, debug=debug )
        picklefile = fileRoot+".SrcReport.pickle" 
        SaveObject( srcMetadata, picklefile, True ) 
        EVLAAddOutFile(os.path.basename(picklefile), 'project', 'All source metadata' )
    
        # Get project metadata; save to pickle file
        projMetadata = KATProjMetadata( uvc, AIPS_VERSION, err, \
            PCals=parms["PCals"], ACals=parms["ACals"], \
            BPCals=parms["BPCals"], DCals=parms["DCals"], \
            project = project, band = band, \
            dataInUVF = parms["archRoot"], archFileID = fileRoot )
        picklefile = fileRoot+".ProjReport.pickle"
        SaveObject(projMetadata, picklefile, True) 
        EVLAAddOutFile(os.path.basename(picklefile), 'project', 'Project metadata' )
    else:
        # Fetch from pickle jar
         picklefile = fileRoot+".SrcReport.pickle"
         srcMetadata = FetchObject(picklefile)
         picklefile = fileRoot+".ProjReport.pickle"
         projMetadata = FetchObject(picklefile)
   
    # Write report
    if parms["doHTML"]:
        mess = "INFO --> Write HTML report (doHTML)"
        printMess(mess, logFile)
        KATHTMLReport( projMetadata, srcMetadata, \
                            outfile=fileRoot+"_report.html", \
                            logFile=logFile )
    
    # Write VOTable
    if parms["doVOTable"]:
        mess = "INFO --> Write VOTable (doVOTable)"
        printMess(mess, logFile)
        EVLAAddOutFile( 'VOTable.xml', 'project', 'VOTable report' ) 
        EVLAWriteVOTable( projMetadata, srcMetadata, filename=fileRoot+'_VOTable.xml' )
    
    # Save list of output files
    EVLASaveOutFiles(manifestfile)
    
    # Cleanup - delete AIPS files
    if parms["doCleanup"] and (not check):
        mess = "INFO --> Clean up (doCleanup)"
        printMess(mess, logFile)
        # Delete target images
        # How many Stokes images
        nstok = len(parms["Stokes"])
        for istok in range(0,nstok):
            oclass = parms["Stokes"][istok:istok+1]+outIClass[1:]
            AllDest(err, disk=disk,Aseq=parms["seq"],Aclass=oclass)
        
        # Delete initial UV data
        Aname = EVLAAIPSName(project)
        # Test if data exists
        cno = AIPSDir.PTestCNO(disk, user, Aname, dataClass[0:6], "UV", parms["seq"], err)
        if cno>0:
            uvt = UV.newPAUV("AIPS RAW UV DATA", Aname, dataClass[0:6], disk, parms["seq"], True, err)
            uvt.Zap(err)
            del uvt
            if err.isErr:
                OErr.printErrMsg(err, "Error deleting raw AIPS data")
        # Zap calibrated/averaged data
        # Test if data exists
        cno = AIPSDir.PTestCNO(disk, user, Aname, avgClass[0:6], "UV", parms["seq"], err)
        if cno>0:
            uvt = UV.newPAUV("AIPS CAL UV DATA", Aname, avgClass[0:6], disk, parms["seq"], True, err)
            uvt.Zap(err)
            del uvt
            if err.isErr:
                OErr.printErrMsg(err, "Error deleting cal/avg AIPS data")
        # Zap UnHanned data if present
        loadClass = "Raw"
        # Test if image exists
        cno = AIPSDir.PTestCNO(disk, user, Aname, loadClass[0:6], "UV", parms["seq"], err)
        if cno>0:
            uvt = UV.newPAUV("AIPS CAL UV DATA", Aname, loadClass[0:6], disk, parms["seq"], True, err)
            uvt.Zap(err)
            del uvt
            if err.isErr:
                OErr.printErrMsg(err, "Error deleting cal/avg AIPS data")
        OErr.printErrMsg(err, "Writing output/cleanup")


    # Delete AIPS scratch DA00 and disk
    if os.path.exists(os.environ['DA00']): shutil.rmtree(os.environ['DA00'])
    for disk in ObitTalkUtil.AIPSDir.AIPSdisks:
        if os.path.exists(disk): shutil.rmtree(disk)


    # Shutdown
    mess = "Finished project "+parms["project"]+ \
    " AIPS user no. "+str(AIPS.userno)
    printMess(mess, logFile)
    OErr.printErr(err)
    OSystem.Shutdown(ObitSys)
Ejemplo n.º 13
0
def NVSSFindFile(RA, Dec, equinox, err, stokes='I'):
    """ Determine the NVSS image best suited for a given position

    Returns name of image
    RA       = Right ascension as string ("HH MM SS.SS")
    Dec      = Declonation as string ("sDD MM SS.SS")
    equinox  = equinox of RA,Dec, 1950 or 2000
    err      = Python Obit Error/message stack
    stokes   = Stokes desired, 'I', 'Q' or 'U'
    """
    ################################################################
    # Checks
    if not OErr.OErrIsA(err):
        raise TypeError,"err MUST be an OErr"
    if err.isErr: # existing error?
        return
    if not type(RA)==str:
        raise TypeError,"RA must be a string (HH MM SS.S)"
    if not type(Dec)==str:
        raise TypeError,"Dec must be a string (HH MM SS.S)"
    #
    ra  = ImageDesc.PHMS2RA(RA, sep=' ')
    dec = ImageDesc.PDMS2Dec(Dec, sep=' ')
    # Precess 1950 to 2000?
    if equinox==1950:
        (ra,dec) = SkyGeom.PBtoJ(ra,dec)

    # Must be north of -41 dec
    if dec<-41.:
        OErr.PLog(err, OErr.Error, "Dec MUST be > -41")
        return "No Image"

    # Maximum distance to "belong"
    tolDec = 2.0
    idec   = -1
    adec   = abs(dec)
    for i in range(0,23):
        if abs(adec-NVSSnumMap[i][0])<=tolDec:
            dd = int(NVSSnumMap[i][0]+0.01)
            idec = i
            break;
    # Find it?
    if idec<0:
        OErr.PLog(err, OErr.Error, "Failed on declination "+Dec)
        return "No Image"
    # Width of images in RA
    deltRa = 360.0 / NVSSnumMap[idec][1]
    racell = int(0.5 + ra/deltRa)  # Cell
    if racell>=NVSSnumMap[idec][1]: # Wrap?
        racell = 0
    # Derive name
    raCen = racell*deltRa / 15.0
    iRAH = int(raCen + 0.5)
    iRAM = int(60.0 * (raCen-iRAH) + 0.1)
    if iRAM<0.0:
        iRAH -= 1
        iRAM += 59
    if iRAH == -1:
        iRAH = 23
    # Sign of declination
    if dec>=-2.0:
        dsign = 'P'
    else:
        dsign = 'M'
    # Stokes type
    if stokes in ["Q","U"]:
        itype = 'C'
    else:
        itype = 'I'
    # put it together
    file = "%s%2.2i%2.2i%s%2.2i"%(itype,iRAH,iRAM,dsign,dd)
    return file
Ejemplo n.º 14
0
def KAT2AIPS (katdata, outUV, disk, fitsdisk, err, \
              calInt=1.0, static=None, **kwargs):
    """Convert MeerKAT MVF data set to an Obit UV.

    This module requires katdat and katpoint and their dependencies
    contact Ludwig Schwardt <*****@*****.**> for details.

    Parameters
    ----------
    katdata : string
        input katdal object
    outUV : ??
        Obit UV object, shoud be a KAT template for the
        appropriate number of IFs and poln.
    disk  : int
        AIPS Disk number
    fitsdisk: int
        FITS Disk number
    err : ??
        Obit error/message stack
    calInt : 
        Calibration interval in min.
    targets : list, optinal
        List of targetnames to extract from the file
    stop_w : bool
        Fring stop data? (Values only for KAT-7)
    """
    ################################################################
    OErr.PLog(err, OErr.Info, "Converting MVF data to AIPS UV format.")
    OErr.printErr(err)
    print("Converting MVF data to AIPS UV format.\n")

    # Extract metadata
    meta = GetKATMeta(katdata, err)

    # TODO: Fix this all up so that the below isn't the case!
    if meta["products"].size != meta["nants"] * meta["nants"] * 4:
        raise ValueError(
            "Only full stokes and all correlation products are supported.")

    # Extract AIPS parameters of the uv data to the metadata
    meta["Aproject"] = outUV.Aname
    meta["Aclass"] = outUV.Aclass
    meta["Aseq"] = outUV.Aseq
    meta["Adisk"] = disk
    meta["calInt"] = calInt
    meta["fitsdisk"] = fitsdisk
    # Update descriptor
    UpdateDescriptor(outUV, meta, err)
    # Write AN table
    WriteANTable(outUV, meta, err)
    # Write FQ table
    WriteFQTable(outUV, meta, err)
    # Write SU table
    WriteSUTable(outUV, meta, err)

    # Convert data
    ConvertKATData(outUV,
                   katdata,
                   meta,
                   err,
                   static=static,
                   blmask=kwargs.get('blmask', 1.e10),
                   stop_w=kwargs.get('stop_w', False),
                   timeav=kwargs.get('timeav', 1),
                   flag=kwargs.get('flag', False),
                   doweight=kwargs.get('doweight', True),
                   doflags=kwargs.get('doflags', True))

    # Index data
    OErr.PLog(err, OErr.Info, "Indexing data")
    OErr.printErr(err)
    UV.PUtilIndex(outUV, err)

    # Open/close UV to update header
    outUV.Open(UV.READONLY, err)
    outUV.Close(err)
    if err.isErr:
        OErr.printErrMsg(err, message="Update UV header failed")

    # initial CL table
    OErr.PLog(err, OErr.Info, "Create Initial CL table")
    OErr.printErr(err)
    print("Create Initial CL table\n")
    UV.PTableCLfromNX(outUV, meta["maxant"], err, calInt=calInt)
    outUV.Open(UV.READONLY, err)
    outUV.Close(err)
    if err.isErr:
        OErr.printErrMsg(err, message="Update UV header failed")

    # History
    outHistory = History.History("outhistory", outUV.List, err)
    outHistory.Open(History.READWRITE, err)
    outHistory.TimeStamp("Convert MeerKAT MVF data to Obit", err)
    for name in katdata.name.split(','):
        for line in _history_wrapper.wrap("datafile = " + name):
            outHistory.WriteRec(-1, line, err)
    outHistory.WriteRec(-1, "calInt   = " + str(calInt), err)
    outHistory.Close(err)
    outUV.Open(UV.READONLY, err)
    outUV.Close(err)
    if err.isErr:
        OErr.printErrMsg(err, message="Update UV header failed")
    # Return the metadata for the pipeline
    return meta
Ejemplo n.º 15
0
def ConvertKATData(outUV,
                   katdata,
                   meta,
                   err,
                   static=None,
                   blmask=1.e10,
                   stop_w=False,
                   timeav=1,
                   flag=False,
                   doweight=True,
                   doflags=True):
    """
    Read KAT HDF data and write Obit UV

     * outUV    = Obit UV object
     * katdata  = input KAT dataset
     * meta     = dict with data meta data
     * err      = Python Obit Error/message stack to init
    """
    ################################################################
    reffreq = meta["spw"][0][1]  # reference frequency
    lamb = 2.997924562e8 / reffreq  # wavelength of reference freq
    nchan = meta["spw"][0][0]  # number of channels
    nif = len(meta["spw"])  # Number of IFs
    nstok = meta["nstokes"]  # Number of Stokes products
    newants = meta["newants"]
    p = meta["products"]  # baseline stokes indices
    b = meta["baselines"]
    bi = meta["blineind"]
    nbase = b.shape[0]  # number of correlations/baselines
    nprod = nbase * nstok
    antslookup = meta["antLookup"]
    # work out Start time in unix sec
    tm = katdata.timestamps[0]
    tx = time.gmtime(tm)
    time0 = tm - tx[3] * 3600.0 - tx[4] * 60.0 - tx[5]

    # Set data to read one timestamp per IO
    outUV.List.set("nVisPIO", nbase)
    d = outUV.Desc.Dict
    d.update(numVisBuff=nbase)
    outUV.Desc.Dict = d
    # Open data
    zz = outUV.Open(UV.READWRITE, err)
    if err.isErr:
        OErr.printErrMsg(err, "Error opening output UV")
    # visibility record offsets
    idb = {}
    idb['ilocu'] = d['ilocu']
    idb['ilocv'] = d['ilocv']
    idb['ilocw'] = d['ilocw']
    idb['iloct'] = d['iloct']
    idb['ilocb'] = d['ilocb']
    idb['ilocsu'] = d['ilocsu']
    idb['nrparm'] = d['nrparm']

    visshape = nchan * nstok * 3
    count = 0.0
    # Get IO buffers as numpy arrays
    buff = numpy.frombuffer(outUV.VisBuf, dtype=numpy.float32)
    #Set up a flagger if needs be
    if flag:
        flagger = SumThresholdFlagger(
            outlier_nsigma=4.5,
            freq_chunks=7,
            spike_width_freq=1.5e6 / katdata.channel_width,
            spike_width_time=100. / katdata.dump_period,
            time_extend=3,
            freq_extend=3,
            average_freq=1)
    #Set up the baseline mask
    blmask = get_baseline_mask(newants, katdata.corr_products, blmask)

    # Template vis
    vis = outUV.ReadVis(err, firstVis=1)
    first = True
    visno = 1
    numflags = 0
    numvis = 0
    # Do we need to stop Fringes
    if stop_w:
        msg = "W term in UVW coordinates will be used to stop the fringes."
        OErr.PLog(err, OErr.Info, msg)
        OErr.printErr(err)
        print(msg)

    # Set up baseline vectors of uvw calculation
    array_centre = katpoint.Antenna('', *newants[0].ref_position_wgs84)
    baseline_vectors = numpy.array(
        [array_centre.baseline_toward(antenna) for antenna in newants])

    max_scan = 151
    QUACK = 1
    # Generate arrays for storage
    scan_vs = numpy.empty((max_scan, nchan, nprod), dtype=katdata.vis.dtype)
    scan_fg = numpy.empty((max_scan, nchan, nprod), dtype=katdata.flags.dtype)
    scan_wt = numpy.empty((max_scan, nchan, nprod),
                          dtype=katdata.weights.dtype)
    for scan, state, target in katdata.scans():
        # Don't read at all if all will be "Quacked"
        if katdata.shape[0] < ((QUACK + 1) * timeav):
            continue
        # Chunk data into max_scan dumps
        if katdata.shape[0] > max_scan:
            scan_slices = [
                slice(i, i + max_scan, 1)
                for i in range(QUACK * timeav, katdata.shape[0], max_scan)
            ]
            scan_slices[-1] = slice(scan_slices[-1].start, katdata.shape[0], 1)
        else:
            scan_slices = [slice(QUACK * timeav, katdata.shape[0])]

        # Number of integrations
        num_ints = katdata.timestamps.shape[0] - QUACK * timeav
        msg = "Scan:%4d Int: %4d %16s Start %s" % (
            scan, num_ints, target.name,
            day2dhms((katdata.timestamps[0] - time0) / 86400.0)[0:12])
        OErr.PLog(err, OErr.Info, msg)
        OErr.printErr(err)
        print(msg)
        for sl in scan_slices:
            tm = katdata.timestamps[sl]
            nint = tm.shape[0]
            load(katdata, numpy.s_[sl.start:sl.stop, :, :], scan_vs[:nint],
                 scan_wt[:nint], scan_fg[:nint], err)
            # Make sure we've reset the weights
            wt = scan_wt[:nint]
            if doweight == False:
                wt[:] = 1.
            vs = scan_vs[:nint]
            fg = scan_fg[:nint]
            if doflags == False:
                fg[:] = False
            if static is not None:
                fg[:, :, blmask] |= static[numpy.newaxis, :, numpy.newaxis]
            if flag:
                fg |= flag_data(vs, fg, flagger)
            if timeav > 1:
                vs, wt, fg, tm, _ = averager.average_visibilities(
                    vs,
                    wt,
                    fg,
                    tm,
                    katdata.channel_freqs,
                    timeav=int(timeav),
                    chanav=1)
                # Update number of integrations for averaged data.
                nint = tm.shape[0]
            # Get target suid
            # Only on targets in the input list
            try:
                suid = meta["targLookup"][target.name[0:16]]
            except:
                continue

            numflags += numpy.sum(fg)
            numvis += fg.size

            # uvw calculation
            uvw_coordinates = get_uvw_coordinates(array_centre,
                                                  baseline_vectors, tm, target,
                                                  bi)

            # Convert to aipsish
            uvw_coordinates /= lamb

            # Convert to AIPS time
            tm = (tm - time0) / 86400.0

            #Get random parameters for this scan
            rp = get_random_parameters(idb, b, uvw_coordinates, tm, suid)
            # Loop over integrations
            for iint in range(0, nint):
                # Fill the buffer for this integration
                buff = fill_buffer(vs[iint], fg[iint], wt[iint], rp[iint], p,
                                   bi, buff)
                # Write to disk
                outUV.Write(err, firstVis=visno)
                visno += nbase
                firstVis = None
            # end loop over integrations
            if err.isErr:
                OErr.printErrMsg(err, "Error writing data")
    # end loop over scan
    if numvis > 0:
        msg = "Applied %s online flags to %s visibilities (%.3f%%)" % (
            numflags, numvis, (float(numflags) / float(numvis) * 100.))
        OErr.PLog(err, OErr.Info, msg)
        OErr.printErr(err)
    outUV.Close(err)
    if err.isErr:
        OErr.printErrMsg(err, "Error closing data")