Exemplo n.º 1
0
def PTestCNO(disk, user, Aname, Aclass, Atype, seq, err):
    """ Test if AIPS file exists

    returns AIPS cno, -1 => not found
    disk     = AIPS disk number
    user     = AIPS user number
    Aname    = AIPS file name
    Aclass   = AIPS class name
    Atype    = 'MA' or 'UV' for image or uv data
    seq      = AIPS sequence number
    err      = Python Obit Error/message stack, 
    """
    ################################################################
    # Checks
    if err.isErr:
        return -1
    if not OErr.OErrIsA(err):
        raise TypeError,"err MUST be an OErr"
    #
    # Print message stack to clear
    OErr.printErr(err)
    ret = Obit.AIPSDirFindCNO(disk, user, Aname, Aclass, Atype, seq, err.me)
    if err.isErr:
        return ret
    # Clear any couldn't find message
    OErr.PClear(err)
    return ret
Exemplo n.º 2
0
def MakeMask(im, vlver, maskFile, err, minSNR=5.0):
    """
    Create a CLEAN mask file for Obit Imaging tasks CLEANFile
    Radius of entries = major axis size.    

    im       = Image with VL table
    vlver    = VL Table version, generate with FndSou
    maskFile = name for output mask file
    err      = Obit error/message stack
    minSNR   = Min. SNR to use
    """
    scale = 1.0
    # Open output file
    fd = open(maskFile, 'w')

    vltab = im.NewTable(Table.READONLY, 'AIPS VL', vlver, err)
    maxscale = scale / abs(im.Desc.Dict['cdelt'][0])
    nrow = vltab.Desc.Dict['nrow']
    vltab.Open(Table.READONLY, err)
    OErr.printErr(err)
    for irow in range(1, nrow + 1):
        row = vltab.ReadRow(irow, err)
        if row['PEAK INT'][0] / row['I RMS'][0] > minSNR:
            ra = ImageDesc.PRA2HMS(row['RA(2000)'][0])
            dec = ImageDesc.PDec2DMS(row['DEC(2000)'][0])
            size = min(50, int(row['MAJOR AX'][0] * maxscale + 0.5))
            line = "%s %s %d\n" % (ra, dec, size)
            fd.write(line)

    vltab.Close(err)
    OErr.printErr(err)
    fd.close()
Exemplo n.º 3
0
def PEdit (inHis, startr, endr, err):
    """
    Edit History
    
    Deletes a range of history records.
    return 0 on success, else failure

    * inHis    = input Python History
    * startr   = first (1-rel) history record to delete
    * endr     = highest (1-rel) history record to delete, 0=>to end
    * err      = Python Obit Error/message stack
    """
    ################################################################
    # Checks
    if not PIsA(inHis):
        raise TypeError("inHis MUST be a History")
    if not OErr.OErrIsA(err):
        raise TypeError("err MUST be an OErr")
    #
    ret = POpen (inHis, READWRITE, err)
    ret = Obit.HistoryEdit(inHis.me, startr, endr, err.me)
    OErr.printErr(err)
    if err.isErr:
        OErr.printErrMsg(err, "Error Editing History")
    ret = PClose (inHis, err)
    return ret
Exemplo n.º 4
0
def PTestCNO(disk, user, Aname, Aclass, Atype, seq, err):
    """
    Test if AIPS file exists
    
    returns AIPS cno, -1 => not found

    * disk     = AIPS disk number
    * user     = AIPS user number
    * Aname    = AIPS file name
    * Aclass   = AIPS class name
    * Atype    = 'MA' or 'UV' for image or uv data
    * seq      = AIPS sequence number
    * err      = Python Obit Error/message stack, 
    """
    ################################################################
    # Checks
    if err.isErr:
        return -1
    if not OErr.OErrIsA(err):
        raise TypeError, "err MUST be an OErr"
    #
    # Print message stack to clear
    OErr.printErr(err)
    ret = Obit.AIPSDirFindCNO(disk, user, Aname, Aclass, Atype, seq, err.me)
    if err.isErr:
        return ret
    # Clear any couldn't find message
    OErr.PClear(err)
    return ret
Exemplo n.º 5
0
def VLAUVLoad(filename, inDisk, Aname, Aclass, Adisk, Aseq, err, logfile=''):
    """ Read FITS uvtab file into AIPS

    Read a UVTAB FITS UV data file and write an AIPS data set
    filename   = name of FITS file
    inDisk     = FITS directory number
    Aname      = AIPS name of file
    Aclass     = AIPS class of file
    Aseq       = AIPS sequence number of file, 0=> create new
    Adisk      = FITS directory number
    err        = Python Obit Error/message stack
    logfile    = logfile for messages
    returns AIPS UV data object
    """
    ################################################################
    #
    # Checks
    if not OErr.OErrIsA(err):
        raise TypeError,"err MUST be an OErr"
    #
    # Get input
    inUV = UV.newPFUV("FITS UV DATA", filename, inDisk, True, err)
    if err.isErr:
        OErr.printErrMsg(err, "Error with FITS data")
    # Get output, create new if seq=0
    if Aseq<1:
        OErr.printErr(err)   # Print any outstanding messages
        user = OSystem.PGetAIPSuser()
        Aseq=AIPSDir.PHiSeq(Adisk,user,Aname,Aclass,"MA",err)
        # If it already exists, increment seq
        if AIPSDir.PTestCNO(Adisk,user,Aname,Aclass,"MA",Aseq,err)>0:
            Aseq = Aseq+1
        OErr.PClear(err)     # Clear any message/error
    mess = "Creating AIPS UV file "+Aname+"."+Aclass+"."+str(Aseq)+" on disk "+str(Adisk)
    printMess(mess, logfile)
    outUV = UV.newPAUV("AIPS UV DATA", Aname, Aclass, Adisk, Aseq, False, err)
    if err.isErr:
        OErr.printErrMsg(err, "Error creating AIPS data")
    # Copy
    UV.PCopy (inUV, outUV, err)
    if err.isErr:
        OErr.printErrMsg(err, "Error copying UV data to AIPS")
    # Copy History
    inHistory  = History.History("inhistory",  inUV.List, err)
    outHistory = History.History("outhistory", outUV.List, err)
    History.PCopyHeader(inHistory, outHistory, err)
    # Add history
    outHistory.Open(History.READWRITE, err)
    outHistory.TimeStamp(" Start Obit uvlod",err)
    outHistory.WriteRec(-1,"uvlod   / FITS file "+filename+" disk "+str(inDisk),err)
    outHistory.Close(err)
   #
    # Copy Tables
    exclude=["AIPS HI", "AIPS AN", "AIPS FQ", "AIPS SL", "AIPS PL", "History"]
    include=[]
    UV.PCopyTables (inUV, outUV, exclude, include, err)
    return outUV  # return new object
Exemplo n.º 6
0
def WriteFGTable(outUV, katdata, meta, err):
    """
    Get the flags from the h5 file and convert to FG table format.
    UNUSED- This implimentation is too slow!
    outUV    = Obit UV object
    meta     =  dict with data meta data
    err      = Python Obit Error/message stack to init
    """
    ###############################################################

    # work out Start time in unix sec
    tm = katdata.timestamps[1:2]
    tx = time.gmtime(tm[0])
    time0 = tm[0] - tx[3] * 3600.0 - tx[4] * 60.0 - tx[5]
    int_time = katdata.dump_period

    #Loop through scans in h5 file
    row = 0
    for scan, state, target in katdata.scans():
        name = target.name.replace(' ', '_')
        if state != 'track':
            continue
        tm = katdata.timestamps[:]
        nint = len(tm)
        el = target.azel(tm[int(nint / 2)])[1] * 180. / math.pi
        if el < 15.0:
            continue
        row += 1
        flags = katdata.flags()[:]
        numflag = 0
        for t, chan_corr in enumerate(flags):
            for c, chan in enumerate(chan_corr):
                cpflagged = []
                for p, cp in enumerate(chan):
                    #for cpaverage in meta['pairLookup']:
                    flag = cp  #numpy.any(chan[meta['pairLookup'][cpaverage]])
                    product = meta['products'][p]
                    if product[0] == product[1]:
                        continue
                    if flag and (not product[0:2] in cpflagged):
                        cpflagged.append(product[0:2])
                        numflag += 1
                        starttime = float(
                            (tm[t] - time0 - (int_time / 2)) / 86400.0)
                        endtime = float(
                            (tm[t] - time0 + (int_time / 2)) / 86400.0)
                        UV.PFlag(outUV,err,timeRange=[starttime,endtime], flagVer=1, \
                                     Ants=[product[0],product[1]], \
                                     Chans=[c+1,c+1], IFs=[1,1], Stokes='1111', Reason='Online flag')
        numvis = t * c * (p / meta["nstokes"])
        msg = "Scan %4d %16s   Online flags: %7s of %8s vis" % (
            row, name, numflag, numvis)
        OErr.PLog(err, OErr.Info, msg)
        OErr.printErr(err)
Exemplo n.º 7
0
def load(dataset, indices, vis, weights, flags, err):
    """Load data from lazy indexers into existing storage.
    This is optimised for the MVF v4 case where we can use dask directly
    to eliminate one copy, and also load vis, flags and weights in parallel.
    In older formats it causes an extra copy.
    Parameters
    ----------
    dataset : :class:`katdal.DataSet`
        Input dataset, possibly with an existing selection
    indices : tuple
        Slice expression for subsetting the dataset
    vis, flags : array-like
        Outputs, which must have the correct shape and type
    """

    t_min = indices[0].start
    t_max = indices[0].stop
    in_time_slices = [
        slice(ts, min(ts + CHUNK_SIZE, t_max))
        for ts in range(t_min, t_max, CHUNK_SIZE)
    ]
    for in_ts in in_time_slices:
        out_ts = slice(in_ts.start - t_min, in_ts.stop - t_min)
        out_vis = vis[out_ts]
        out_weights = weights[out_ts]
        out_flags = flags[out_ts]
        for i in range(NUM_RETRIES):
            try:
                if isinstance(dataset.vis, DaskLazyIndexer):
                    DaskLazyIndexer.get(
                        [dataset.vis, dataset.weights, dataset.flags],
                        in_ts,
                        out=[out_vis, out_weights, out_flags])
                else:
                    out_vis[:] = dataset.vis[in_ts]
                    out_weights[:] = dataset.weights[in_ts]
                    out_flags[:] = dataset.flags[in_ts]
                break
            except (StoreUnavailable, socket.timeout):
                msg = 'Timeout when reading dumps %d to %d. Try %d/%d....' % (
                    out_ts.start + 1, out_ts.stop, i + 1, NUM_RETRIES)
                OErr.PLog(err, OErr.Warn, msg)
                OErr.printErr(err)
                print(msg)
        # Flag the data and warn if we can't get it
        if i == NUM_RETRIES - 1:
            msg = 'Too many timeouts, flagging dumps %d to %d' % (
                out_ts.start + 1, out_ts.stop)
            OErr.PLog(err, OErr.Warn, msg)
            OErr.printErr(err)
            print(msg)
            flags[out_ts] = True
Exemplo n.º 8
0
def WriteFQTable(outUV, meta, err):
    """
    Write data in meta to FQ table
    An old FQ table is deleted

     * outUV    = Obit UV object
     * meta     = dict with data meta data
     * err      = Python Obit Error/message stack to init
    """
    ################################################################
    # If an old table exists, delete it
    if outUV.GetHighVer("AIPS FQ") > 0:
        zz = outUV.ZapTable("AIPS FQ", 1, err)
        if err.isErr:
            OErr.printErrMsg(err, "Error zapping old FQ table")
    reffreq = meta["spw"][0][1]  # reference frequency
    noif = 1  # Number of IFs (1 always for KAT7)
    fqtab = outUV.NewTable(Table.READWRITE, "AIPS FQ", 1, err, numIF=noif)
    if err.isErr:
        OErr.printErrMsg(err, "Error with FQ table")
    fqtab.Open(Table.READWRITE, err)
    if err.isErr:
        OErr.printErrMsg(err, "Error opening FQ table")
    # Update header
    fqtab.keys['NO_IF'] = 1  # Structural so no effect
    Table.PDirty(fqtab)  # Force update
    # Create row
    row = {'FRQSEL': [1], 'CH WIDTH': [0.0], 'TOTAL BANDWIDTH': [0.0], \
           'RXCODE': ['L'], 'SIDEBAND': [-1], 'NumFields': 7, 'Table name': 'AIPS FQ', \
           '_status': [0], 'IF FREQ': [0.0]}
    if err.isErr:
        OErr.printErrMsg(err, "Error reading FQ table")
    OErr.printErr(err)
    irow = 0
    for sw in meta["spw"]:
        irow += 1
        row['FRQSEL'] = [irow]
        row['IF FREQ'] = [sw[1] - reffreq]
        row['CH WIDTH'] = [sw[2]]
        row['TOTAL BANDWIDTH'] = [abs(sw[2]) * sw[0]]
        row['RXCODE'] = ['L']
        if sw[2] > 0.0:
            row['SIDEBAND'] = [1]
        else:
            row['SIDEBAND'] = [-1]
        fqtab.WriteRow(irow, row, err)
        if err.isErr:
            OErr.printErrMsg(err, "Error writing FQ table")
    fqtab.Close(err)
    if err.isErr:
        OErr.printErrMsg(err, "Error closing FQ table")
Exemplo n.º 9
0
    def close(self):
        """
        Shutdown the Obit System, logging any errors on the error stack
        """

        # Remove defined AIPS & FITS dirs from environment to prevent
        # overloading the list of defined disks when multiple Obit
        # environments are constructed in a single python session
        # (eg. during the unit tests).
        AIPS.AIPS.disks = [None]
        AIPSDir.AIPSdisks = []

        if self.err.isErr:
            OErr.printErr(self.err)

        OSystem.Shutdown(self.obitsys)
Exemplo n.º 10
0
def PCreateByNumber (name, inUV, SouID,eErr):
    """ Create a Source from a UV for a specified Source ID

    If inUV has a SoUrce table the Source is extracted from it, otherwise
    from the information in the descriptor
     * name   = Name for object
     * inUV   = UV data for source info
     * SouID  = Source identifier in inUV
     * err    = Python Obit Error/message stack
      returns  new Source object
    """
    ################################################################
    out    = Source("None")
    out.me = Obit.SourceCreateByNumber(name, inUV.me, SouID, err.me)
    if err.isErr:
        OErr.printErr(err)
        raise RuntimeError("Failed")
    return out
Exemplo n.º 11
0
def WriteSUTable(outUV, meta, err):
    """
    Write data in meta to SU table

     * outUV    = Obit UV object
     * meta     = dict with data meta data
     * err      = Python Obit Error/message stack to init
    """
    ################################################################
    sutab = outUV.NewTable(Table.READWRITE, "AIPS SU", 1, err)
    if err.isErr:
        OErr.printErrMsg(err, "Error with SU table")
    sutab.Open(Table.READWRITE, err)
    if err.isErr:
        OErr.printErrMsg(err, "Error opening SU table")
    # Update header
    sutab.keys['RefDate'] = meta["obsdate"]
    sutab.keys['Freq'] = meta["spw"][0][1]
    Table.PDirty(sutab)  # Force update
    row = sutab.ReadRow(1, err)
    if err.isErr:
        OErr.printErrMsg(err, "Error reading SU table")
    OErr.printErr(err)
    irow = 0
    for tar in meta["targets"]:
        irow += 1
        row['ID. NO.'] = [tar[0]]
        row['SOURCE'] = [tar[1]]
        row['RAEPO'] = [tar[2]]
        row['DECEPO'] = [tar[3]]
        row['RAOBS'] = [tar[2]]
        row['DECOBS'] = [tar[3]]
        row['EPOCH'] = [2000.0]
        row['RAAPP'] = [tar[4]]
        row['DECAPP'] = [tar[5]]
        row['BANDWIDTH'] = [meta["spw"][0][2]]
        sutab.WriteRow(irow, row, err)
        if err.isErr:
            OErr.printErrMsg(err, "Error writing SU table")
    sutab.Close(err)
    if err.isErr:
        OErr.printErrMsg(err, "Error closing SU table")
Exemplo n.º 12
0
def PWriteRow (inTab, rowno, rowDict, err):
    """ Write an image  persistent (disk) form from a specified Dict

    Writes a single row
    inTab     = Python Table object
    rowno     = row number (1-rel) to write
    rowDict   = Python Dict of same form as returned by PReadRow
    err       = Python Obit Error/message stack
    """
    ################################################################
    # Checks
    if not inTab.IsA():
        raise TypeError("inTab MUST be a Python Obit Table")
    if not err.IsA():
        raise TypeError("err MUST be an OErr")
    if err.isErr: # existing error?
        return
    #
    Obit.TableWriteRow (inTab.me, rowno, rowDict, err.me)
    OErr.printErr(err)
Exemplo n.º 13
0
def WriteANTable(outUV, meta, err):
    """
    Write data in meta to AN table

     * outUV    = Obit UV object
     * meta     = dict with data meta data
     * err      = Python Obit Error/message stack to init
    """
    ################################################################
    antab = outUV.NewTable(Table.READWRITE, "AIPS AN", 1, err)
    if err.isErr:
        OErr.printErrMsg(err, "Error with AN table")
    antab.Open(Table.READWRITE, err)
    if err.isErr:
        OErr.printErrMsg(err, "Error opening AN table")
    # Update header
    antab.keys['RefDate'] = meta["obsdate"]
    antab.keys['Freq'] = meta["spw"][0][1]
    JD = UVDesc.PDate2JD(meta["obsdate"])
    antab.keys['GSTiat0'] = UVDesc.GST0(JD) * 15.0
    antab.keys['DEGPDY'] = UVDesc.ERate(JD) * 360.0
    Table.PDirty(antab)  # Force update

    row = antab.ReadRow(1, err)
    if err.isErr:
        OErr.printErrMsg(err, "Error reading AN table")
    OErr.printErr(err)
    irow = 0
    for ant in meta["ants"]:
        irow += 1
        row['NOSTA'] = [ant[0]]
        row['ANNAME'] = [ant[1] + "    "]
        row['STABXYZ'] = [ant[2], ant[3], ant[4]]
        row['DIAMETER'] = [ant[5]]
        row['POLAA'] = [90.0]
        antab.WriteRow(irow, row, err)
        if err.isErr:
            OErr.printErrMsg(err, "Error writing AN table")
    antab.Close(err)
    if err.isErr:
        OErr.printErrMsg(err, "Error closing AN table")
Exemplo n.º 14
0
def NVSSPtFlux(RA, Dec, equinox, err, stokes='I',dir="/home/ftp/nvss/MAPS"):
    """ Determine the NVSS Flux density

    Returns flux density, None on failure
    RA       = Right ascension as string ("HH MM SS.SS")
    Dec      = Declonation as string ("sDD MM SS.SS")
    equinox  = equinox of RA,Dec, 1950 or 2000
    err      = Python Obit Error/message stack
    stokes   = Stokes desired, 'I', 'Q' or 'U'
    dir      = directory or url of directory
    """
    ################################################################
    # Checks
    if not OErr.OErrIsA(err):
        raise TypeError,"err MUST be an OErr"
    if err.isErr: # existing error?
        return None
    #
    file = NVSSFindFile(RA, Dec, equinox, err, stokes=stokes)
    if err.isErr:
        OErr.printErr(err)
        return None
    path = dir+"/"+file
    #print "Image ",path
    img = Image.newPFImage("Image", path, 0, True, err, verbose=False)
    if err.isErr:
        OErr.printErr(err)
        return None
    interp = ImageInterp.PCreate("Interpolator", img, err)
    if err.isErr:
        OErr.printErr(err)
        return None
    ra  = ImageDesc.PHMS2RA(RA, sep=' ')
    dec = ImageDesc.PDMS2Dec(Dec, sep=' ')
    # Precess 1950 to 2000?
    if equinox==1950:
        (ra,dec) = SkyGeom.PBtoJ(ra,dec)
    # Plane
    plane = 1
    if stokes=='Q':
        plane = 2
    if stokes=='U':
        plane = 3
    # Interpolate
    value = interp.Value(ra, dec, err, plane=plane)
    if err.isErr:
        OErr.printErr(err)
        return None
    return value;
Exemplo n.º 15
0
def PSingle(nterm, refFreq, freq, flux, sigma, err, doBrokePow=False):
    """  Fit single spectrum to flux measurements
    
    Does error analysis and makes primary beam correction
    Returns  array of fitter parameters, errors for each and Chi Squares of fit
             Initial terms are in Jy, other in log.
    nterm   = Number of coefficients of powers of log(nu) to fit
    refFreq = Reference frequency for fit (Hz)
    freq    = Array of Frequencies (Hz)
    flux    = Array of fluxes (Jy) same dim as freq
    sigma   = Array of errors (Jy) same dim as freq
    err     = Obit error stack
    doBrokePow = If true do broken power law (3 terms)
    """
    ################################################################
    #
    nfreq = len(freq)
    ret = Obit.SpectrumFitSingle(nfreq, nterm, refFreq, freq, flux, sigma, \
                                 doBrokePow, err.me)
    OErr.printErr(err)
    OErr.printErrMsg(err, "Fitting failed")
    return ret
Exemplo n.º 16
0
def PSingle (refLamb2, lamb2, qflux, qsigma, uflux, usigma, err, nterm=2):
    """  Fit RM, EVPA0 to Q, U flux measurements
    
    Also does error analysis
    Returns  array of fitter parameters, errors for each and Chi Squares of fit
    refLamb2 = Reference lambda^2 for fit (m^2)
    lamb2    = Array of lambda^2 for fit (m^2)
    qflux    = Array of Q fluxes (Jy) same dim as lamb2
    qsigma   = Array of Q errors (Jy) same dim as lamb2
    uflux    = Array of U fluxes (Jy) same dim as lamb2
    usigma   = Array of U errors (Jy) same dim as lamb2
    err      = Obit error stack
    nterm    = Number of coefficients to fit (1 or 2)
    """
    ################################################################
    #
    nlamb2 = len(lamb2)
    ret = Obit.RMFitSingle(nlamb2, nterm, refLamb2, lamb2, 
                           qflux, qsigma, uflux, usigma, err.me)
    OErr.printErr(err)
    OErr.printErrMsg(err,"Fitting failed")
    return ret
Exemplo n.º 17
0
def CopyBeamHistory (inIm, outIm, err):
    """
    Copy beam and history from one image to another (beam)

    History may not appear in AIPS header (but is there)
    FITS History is written to "History" table
    * inIm   Input Obit image
    * outIm  Output Obit image (beam)
    * err    Obit Error/message object
    """
    # Copy Beam
    din  = inIm.Desc.Dict
    dout = outIm.Desc.Dict
    dout['beamMaj'] = din['beamMaj']
    dout['beamMin'] = din['beamMin']
    dout['beamPA']  = din['beamPA']
    outIm.Desc.Dict = dout
    outIm.UpdateDesc(err)
    # Copy History
    inHis  = History.History("in",inIm.List,err)
    outHis = History.History("out",outIm.List,err)
    History.PCopy(inHis, outHis,err)
    outIm.UpdateDesc(err)
    OErr.printErr(err)
Exemplo n.º 18
0
def ConvertKATData(outUV, katdata, meta, err):
    """
    Read KAT HDF data and write Obit UV

     * outUV    = Obit UV object
     * katdata  = input KAT dataset
     * meta     = dict with data meta data
     * err      = Python Obit Error/message stack to init
    """
    ################################################################
    reffreq = meta["spw"][0][1]  # reference frequency
    lamb = 2.997924562e8 / reffreq  # wavelength of reference freq
    nchan = meta["spw"][0][0]  # number of channels
    nif = len(meta["spw"])  # Number of IFs
    nstok = meta["nstokes"]  # Number of Stokes products
    p = meta["products"]  # baseline stokes indices
    nprod = len(p)  # number of correlations/baselines
    # work out Start time in unix sec
    tm = katdata.timestamps[1:2]
    tx = time.gmtime(tm[0])
    time0 = tm[0] - tx[3] * 3600.0 - tx[4] * 60.0 - tx[5]

    # Set data to read one vis per IO
    outUV.List.set("nVisPIO", 1)

    # Open data
    zz = outUV.Open(UV.READWRITE, err)
    if err.isErr:
        OErr.printErrMsg(err, "Error opening output UV")
    # visibility record offsets
    d = outUV.Desc.Dict
    ilocu = d['ilocu']
    ilocv = d['ilocv']
    ilocw = d['ilocw']
    iloct = d['iloct']
    ilocb = d['ilocb']
    ilocsu = d['ilocsu']
    nrparm = d['nrparm']
    jlocc = d['jlocc']
    jlocs = d['jlocs']
    jlocf = d['jlocf']
    jlocif = d['jlocif']
    naxes = d['inaxes']
    count = 0.0
    visno = 0
    # Get IO buffers as numpy arrays
    shape = len(outUV.VisBuf) / 4
    buffer = numarray.array(sequence=outUV.VisBuf,
                            type=numarray.Float32,
                            shape=shape)

    # Template vis
    vis = outUV.ReadVis(err, firstVis=1)
    first = True
    firstVis = 1
    numflags = 0
    numvis = 0
    # Do we need to stop Fringes
    try:
        autodelay = [int(ad) for ad in katdata.sensor['DBE/auto-delay']]
        autodelay = all(autodelay)
    except:
        autodelay = False
    if not autodelay:
        msg = "W term in UVW coordinates will be used to stop the fringes."
        OErr.PLog(err, OErr.Info, msg)
        OErr.printErr(err)
        print msg
    for scan, state, target in katdata.scans():
        # Fetch data
        tm = katdata.timestamps[:]
        nint = len(tm)
        vs = katdata.vis[:]
        wt = katdata.weights()[:]
        fg = katdata.flags()[:]
        #Get target suid
        # Only on targets in the input list
        try:
            suid = meta["targLookup"][target.name[0:16]]
        except:
            continue
        # Negate the weights that are online flagged (ie. apply the online flags here)
        wt = numpy.where(fg, -wt, wt)
        numflags += numpy.sum(fg)
        numvis += fg.size
        uu = katdata.u
        vv = katdata.v
        ww = katdata.w
        # Number of integrations
        msg = "Scan:%4d Int: %4d %16s Start %s" % (
            scan, nint, target.name, day2dhms((tm[0] - time0) / 86400.0)[0:12])
        OErr.PLog(err, OErr.Info, msg)
        OErr.printErr(err)
        print msg
        # Loop over integrations
        for iint in range(0, nint):
            # loop over data products/baselines
            for iprod in range(0, nprod):
                thisvis = vs[iint:iint + 1, :, iprod:iprod + 1]
                thisw = ww[iint:iint + 1, iprod]
                # Fringe stop the data if necessary
                if not autodelay:
                    thisvis = StopFringes(thisvis[:, :,
                                                  0], katdata.channel_freqs,
                                          thisw, katdata.corr_products[iprod])
                # Copy slices
                indx = nrparm + (p[iprod][2]) * 3
                buffer[indx:indx + (nchan + 1) * nstok * 3:nstok *
                       3] = thisvis.real.flatten()
                indx += 1
                buffer[indx:indx + (nchan + 1) * nstok * 3:nstok *
                       3] = thisvis.imag.flatten()
                indx += 1
                buffer[indx:indx + (nchan + 1) * nstok * 3:nstok *
                       3] = wt[iint:iint + 1, :, iprod:iprod + 1].flatten()
                # Write if Stokes index >= next or the last
                if (iprod == nprod - 1) or (p[iprod][2] >= p[iprod + 1][2]):
                    # Random parameters
                    buffer[ilocu] = uu[iint][iprod] / lamb
                    buffer[ilocv] = vv[iint][iprod] / lamb
                    buffer[ilocw] = ww[iint][iprod] / lamb
                    buffer[iloct] = (tm[iint] -
                                     time0) / 86400.0  # Time in days
                    buffer[ilocb] = p[iprod][0] * 256.0 + p[iprod][1]
                    buffer[ilocsu] = suid
                    outUV.Write(err, firstVis=visno)
                    visno += 1
                    buffer[3] = -3.14159
                    #print visno,buffer[0:5]
                    firstVis = None  # Only once
                    # initialize visibility
                    first = True
        # end loop over integrations
        if err.isErr:
            OErr.printErrMsg(err, "Error writing data")
    # end loop over scan
    if numvis > 0:
        msg = "Applied %s online flags to %s visibilities (%.3f%%)" % (
            numflags, numvis, float(numflags) / float(numvis))
        OErr.PLog(err, OErr.Info, msg)
        OErr.printErr(err)
    outUV.Close(err)
    if err.isErr:
        OErr.printErrMsg(err, "Error closing data")
Exemplo n.º 19
0
def KAT2AIPS (katdata, outUV, disk, fitsdisk, err, \
              calInt=1.0, **kwargs):
    """Convert KAT-7 HDF 5 data set to an Obit UV.

    This module requires katdal and katpoint and their dependencies
    contact Ludwig Schwardt <*****@*****.**> for details.

    Parameters
    ----------
    katdata : string
        input katfile object
    outUV : ??
        Obit UV object, should be a KAT template for the
        appropriate number of IFs and poln.
    disk  : int
        AIPS Disk number
    fitsdisk: int
        FITS Disk number
    err : ??
        Obit error/message stack
    calInt : 
        Calibration interval in min.
    targets : list, optinal
        List of targetnames to extract from the file
    """
    ################################################################
    OErr.PLog(err, OErr.Info, "Converting h5 data to AIPS UV format.")
    OErr.printErr(err)
    print "Converting h5 data to AIPS UV format.\n"

    # Extract metadata
    meta = GetKATMeta(katdata, err)

    # Extract AIPS parameters of the uv data to the metadata
    meta["Aproject"] = outUV.Aname
    meta["Aclass"] = outUV.Aclass
    meta["Aseq"] = outUV.Aseq
    meta["Adisk"] = disk
    meta["calInt"] = calInt
    meta["fitsdisk"] = fitsdisk
    # Update descriptor
    UpdateDescriptor(outUV, meta, err)
    # Write AN table
    WriteANTable(outUV, meta, err)
    # Write FQ table
    WriteFQTable(outUV, meta, err)
    # Write SU table
    WriteSUTable(outUV, meta, err)

    # Convert data
    ConvertKATData(outUV, katdata, meta, err)

    # Index data
    OErr.PLog(err, OErr.Info, "Indexing data")
    OErr.printErr(err)
    UV.PUtilIndex(outUV, err)

    # Open/close UV to update header
    outUV.Open(UV.READONLY, err)
    outUV.Close(err)
    if err.isErr:
        OErr.printErrMsg(err, message="Update UV header failed")

    # initial CL table
    OErr.PLog(err, OErr.Info, "Create Initial CL table")
    OErr.printErr(err)
    print "Create Initial CL table\n"
    UV.PTableCLGetDummy(outUV, outUV, 1, err, solInt=calInt)
    outUV.Open(UV.READONLY, err)
    outUV.Close(err)
    if err.isErr:
        OErr.printErrMsg(err, message="Update UV header failed")

    # History
    outHistory = History.History("outhistory", outUV.List, err)
    outHistory.Open(History.READWRITE, err)
    outHistory.TimeStamp("Convert KAT7 HDF 5 data to Obit", err)
    outHistory.WriteRec(-1, "datafile = " + katdata.name, err)
    outHistory.WriteRec(-1, "calInt   = " + str(calInt), err)
    outHistory.Close(err)
    outUV.Open(UV.READONLY, err)
    outUV.Close(err)
    if err.isErr:
        OErr.printErrMsg(err, message="Update UV header failed")
    # Return the metadata for the pipeline
    return meta
Exemplo n.º 20
0
def pipeline(aipsSetup, parmFile):
    """
    VLBA Continuum pipeline.
    
    * *aipsSetup* = AIPS setup file
    * *parmFile* = pipeline input parameters file
    """
    ############################# Initialize OBIT ##########################################
    noScrat = []
    exec(open(aipsSetup).read())
    VLBAAddOutFile(aipsSetup, 'project', "Obit's AIPS setup file")

    ############################# Default parameters ##########################################
    # Define data
    project = "Unspecified"  # Project name (12 char or less, used as AIPS Name)
    session = "?"  # Project session code
    band = "?"  # Observing band
    logFile = project + "_" + session + "_" + band + ".log"  # Processing log file
    seq = 1  # AIPS sequence number
    gain = 0.10  # CLEAN loop gain
    doLoadIDI = True  # Load data from IDI FITS?
    doLoadUVF = False  # Load the "AIPS Friendly" (uvfits) FITS  version
    dataInUVF = None  # Input uvfits data file name
    dataInIDI = None  # Input FITS-IDI file or list
    dataClass = "Raw"  # AIPS class of raw uv data
    Compress = False  # Use compressed UV data?
    calInt = 0.15  # Calibration table interval in min.
    wtThresh = 0.8  # Data weight  threshold
    check = False  # Only check script, don't execute tasks
    debug = False  # run tasks debug
    prtLv = 2  # Print level

    # Initialize parameters
    parms = VLBAInitContParms()

    ############################# Set Project Processing parameters ##################
    exec(open(parmFile).read())
    VLBAAddOutFile(parmFile, 'project', 'Pipeline input parameters')

    ################################## Process #####################################
    # Init cal pickle jars
    goodCalPicklefile = project + "_" + session + "_" + band + "_GoodCal.pickle"  # Where results saved
    # Default "best" calibration
    goodCal = {
        "Source": "  ",
        "souID": 0,
        "timeRange": (0.0, 100.0),
        "Fract": 0.0,
        "SNR": 0.0,
        "bestRef": 0
    }
    # Save initial default if it doesn't already exist
    SaveObject(goodCal, goodCalPicklefile, False)
    VLBAAddOutFile(goodCalPicklefile, 'project', 'Best calibrator information')

    OKCalPicklefile = project + "_" + session + "_" + band + "_OKCal.pickle"  # Where results saved
    SaveObject(parms["contCals"], OKCalPicklefile, False)
    VLBAAddOutFile(OKCalPicklefile, 'project', 'List of calibrators')

    targetsPicklefile = project + "_" + session + "_" + band + "_targets.pickle"  # Where results saved
    if (not parms["targets"]) and os.path.exists(targetsPicklefile):
        parms["targets"] = FetchObject(targetsPicklefile)
    else:
        SaveObject(parms["targets"], targetsPicklefile, False)
    VLBAAddOutFile(targetsPicklefile, 'project', 'List of targets')

    # Load the outputs pickle jar
    VLBAFetchOutFiles()

    # Logging directly to logFile
    OErr.PInit(err, prtLv, logFile)
    retCode = 0
    VLBAAddOutFile(logFile, 'project', 'Pipeline log file')

    mess = "Start project " + project + " session " + session + " " + band + " Band" + " AIPS user no. " + str(
        AIPS.userno)
    printMess(mess, logFile)
    logger.info(mess)
    if debug:
        pydoc.ttypager = pydoc.plainpager  # don't page task input displays
        mess = "Using Debug mode "
        printMess(mess, logFile)
    if check:
        mess = "Only checking script"
        printMess(mess, logFile)

    # Load Data from FITS
    uv = None  # Raw data
    uvc = None  # Cal/averaged data
    if doLoadIDI:
        logger.info("--> Load IDI data file (doLoadIDI)")
        if type(dataInIDI) == list:
            logger.info(
                "--> Loading a list of IDI files (dataInIDI is a list)\n" +
                "    This data appears to be from the old correlator.\n" +
                "    IDI corrections will be applied when loading is\n" +
                "    complete.")
            # Read list
            for dataIn in dataInIDI:
                uv = VLBAIDILoad(dataIn, project, session, band, dataClass, disk, seq, err, logfile=logFile, \
                                     wtThresh=wtThresh, calInt=calInt, Compress=Compress, \
                                     check=check, debug=debug)
                if not UV.PIsA(uv):
                    raise RuntimeError("Cannot load " + dataIn)
            # Fix IDI files:
            uv = IDIFix.IDIFix(uv, err)
            seq = uv.Aseq
        else:
            logger.info(
                "--> Loading a single IDI file (dataInIDI is not a list)\n" +
                "    This data appears to be from the DiFX correlator.\n" +
                "    No IDI corrections will be applied.")
            # Single IDI file
            uv = VLBAIDILoad(dataInIDI, project, session, band, dataClass, disk, seq, err, logfile=logFile, \
                                 wtThresh=wtThresh, calInt=calInt, Compress=Compress, \
                                 check=check, debug=debug)
            if not UV.PIsA(uv):
                raise RuntimeError("Cannot load " + dataInIDI)
    if doLoadUVF:
        logger.info("--> Load UVFITS data file (doLoadUVF)")
        uv = VLBAIDILoad(dataInUVF, project, session, band, dataClass, disk, seq, err, logfile=logFile, \
                             wtThresh=wtThresh, calInt=calInt, Compress=Compress, \
                             check=check, debug=debug)
        # Adding check condition to avoid error when checking
        if not UV.PIsA(uv) and not check:
            raise RuntimeError("Cannot load " + dataInUVF)
    # Otherwise set uv
    if uv == None and not check:
        Aname = VLBAAIPSName(project, session)
        uvname = project + "_" + session + "_" + band
        uv = UV.newPAUV(uvname, Aname, dataClass, disk, seq, True, err)
        if err.isErr:
            OErr.printErrMsg(err, "Error creating AIPS data")

    # frequency dependent default parameters
    VLBAInitContFQParms(uv, parms, err, \
                            logfile=logFile, check=check, debug=debug)

    # Setup target source list
    if os.path.exists(targetsPicklefile):
        parms["targets"] = \
            setupSourceList( parms["targets"],  uv, err, logFile, check, debug )
        logger.debug("parms['targets'] = " + str(parms["targets"]))

    # Clear any old calibration/editing
    if parms["doClearTab"]:
        logger.info("--> Clear old calibration/editing (doClearTab)")
        VLBAClearCal(uv, err, doGain=parms["doGain"], doFlag=parms["doFlag"], doBP=parms["doBP"], \
                         check=check, logfile=logFile)
        OErr.printErrMsg(err, "Error resetting calibration")

    # Copy FG 1 to FG 2
    if parms["doCopyFG"]:
        logger.info("--> Copy flag (FG) table 1 to flag table 2 (doCopyFG)")
        retCode = VLBACopyFG(uv,
                             err,
                             logfile=logFile,
                             check=check,
                             debug=debug)
        if retCode != 0:
            raise RuntimeError("Error Copying FG table")

    # Special editing
    if parms["doEditList"] and not check:
        logger.info("--> Special editing (doEditList)")
        for edt in parms["editList"]:
            UV.PFlag(uv,err,timeRange=[dhms2day(edt["timer"][0]),dhms2day(edt["timer"][1])], \
                         flagVer=editFG, Ants=edt["Ant"], Chans=edt["Chans"], IFs=edt["IFs"], \
                         Stokes=edt["Stokes"], Reason=edt["Reason"])
            OErr.printErrMsg(err, "Error Flagging")

    # Quack to remove data from start and end of each scan
    if parms["doQuack"]:
        logger.info("--> Remove data from start and end of scans (doQuack)")
        retCode = VLBAQuack (uv, err, \
                                 begDrop=parms["quackBegDrop"], endDrop=parms["quackEndDrop"], \
                                 Reason=parms["quackReason"], \
                                 logfile=logFile, check=check, debug=debug)
        if retCode != 0:
            raise RuntimeError("Error Quacking data")

    # Median window time editing, for RFI impulsive in time or dropouts
    if parms["doMedn"]:
        logger.info("--> Median window time editing (doMedn)")
        retCode = VLBAMedianFlag (uv, "    ", err, noScrat=noScrat, nThreads=nThreads, \
                                  avgTime=parms["mednAvgTime"], avgFreq=parms["mednAvgFreq"], \
                                  chAvg=parms["mednChAvg"], timeWind=parms["mednTimeWind"], \
                                  flagSig=parms["mednSigma"], flagVer=2, \
                                  logfile=logFile, check=check, debug=False)
        if retCode != 0:
            raise RuntimeError("Error in MednFlag")

# Quantization correction?
    if parms["doQuantCor"]:
        logger.info("--> Quantization correction (doQuantCor)")
        plotFile = project + "_" + session + "_" + band + ".Quant.ps"
        retCode = VLBAQuantCor(uv, parms["QuantSmo"], parms["QuantFlag"], err, \
                                   doSNPlot=parms["doSNPlot"], plotFile=plotFile, \
                                   logfile=logFile, check=check, debug=debug)
        if retCode != 0:
            raise RuntimeError("Error in quantization correcting/flagging")

    # Parallactic angle correction?
    if parms["doPACor"]:
        logger.info("--> Parallactic angle correction (doPACor)")
        retCode = VLBAPACor(uv, err, noScrat=noScrat, \
                                logfile=logFile, check=check, debug=debug)
        if retCode != 0:
            raise RuntimeError("Error in quantization correcting/flagging")

    # Opacity/Tsys/gain correction
    if parms["doOpacCor"]:
        logger.info("--> Opacity/Tsys/Gain correction (doOpacCor)")
        plotFile = project + "_" + session + "_" + band + ".Opacity.ps"
        retCode = VLBAOpacCor(uv, parms["OpacSmoo"], err,  \
                                  doSNPlot=parms["doSNPlot"], plotFile=plotFile, \
                                  logfile=logFile, check=check, debug=debug)
        if retCode != 0:
            raise RuntimeError("Error in opacity/gain.Tsys correction")

        VLBASaveOutFiles()  # Save plot file in Outfiles

    # Need to determine a list of calibrators?
    if (parms["contCals"] == None) or (len(parms["contCals"]) <= 0):
        logger.info("--> Get list of calibrators (contCals = None or [])")
        if parms["doFindOK"]:
            slist = VLBAAllSource(uv,
                                  err,
                                  logfile=logFile,
                                  check=check,
                                  debug=debug)
            parms["contCals"] = VLBAOKCal(uv, parms["minOKFract"], err, \
                                              solInt=parms["findSolInt"],  \
                                              calSou=slist, minSNR=parms["minOKSNR"], \
                                              doCalib=-1, flagVer=2, refAnts=parms["refAnts"], \
                                              noScrat=noScrat, nThreads=nThreads, \
                                              logfile=logFile, check=check, debug=debug)
            if not parms["contCals"] and not check:
                raise RuntimeError("Error in finding acceptable calibrators")
            logger.info("Calibrators = " + str(parms["contCals"]))
        else:
            # Snatch from pickle jar
            parms["contCals"] = FetchObject(OKCalPicklefile)

    # Save contCals to a pickle jar
    SaveObject(parms["contCals"], OKCalPicklefile, True)

    # Find best calibration source
    if parms["doFindCal"]:
        logger.info("--> Find best calibration source (doFindCal)")
        goodCal = VLBAGoodCal(uv,  err, \
                                  solInt=parms["findSolInt"], timeInt=parms["findTimeInt"], \
                                  calSou=parms["contCals"], \
                                  #CalModel=parms["contCalModel"], \
                                  doCalib=-1, flagVer=2, refAnts=parms["refAnts"], \
                                  noScrat=noScrat, nThreads=nThreads, \
                                  logfile=logFile, check=check, debug=debug)
        if not goodCal and not check:
            raise RuntimeError("Error in finding best calibration data")
        # Save it to a pickle jar
        SaveObject(goodCal, goodCalPicklefile, True)
    else:
        # Fetch from pickle
        goodCal = FetchObject(goodCalPicklefile)

    # Apply Phase cals from PC table?
    if parms["doPCcor"] and not check:
        logger.info("--> Apply phase cals (doPCcor)")
        plotFile = project + "_" + session + "_" + band + ".PC.ps"
        retCode = VLBAPCcor(uv, err, calSou=goodCal["Source"], \
                            timeRange=goodCal["timeRange"], \
                            doCalib=-1, flagVer=2, solInt=parms["manPCsolInt"], \
                            PCin=1, SNout=0, refAnt=goodCal["bestRef"], \
                            doPCPlot=parms["doPCPlot"], plotFile=plotFile, \
                            noScrat=noScrat, logfile=logFile, check=check, debug=debug)
        if retCode != 0:
            raise RuntimeError("Error in PC calibration")
        VLBASaveOutFiles()  # Save plot file in Outfiles

    # manual phase cal
    if parms["doManPCal"] and not check:
        logger.info("--> Manual phase cal (doManPCal)")
        plotFile = project + session + band + ".ManPCal.ps"
        retCode = VLBAManPCal(uv, err, calSou=goodCal["Source"], \
                                  #CalModel=parms["contCalModel"], \
                                  timeRange=goodCal["timeRange"], \
                                  solInt=parms["manPCsolInt"], smoTime=parms["manPCSmoo"],  \
                                  refAnts=[goodCal["bestRef"]], doCalib=2, flagVer=2, \
                                  doManPCalPlot=parms["doManPCalPlot"], \
                                  plotFile=plotFile, noScrat=noScrat, \
                                  nThreads=nThreads, logfile=logFile, check=check, debug=debug)
        if retCode != 0:
            raise RuntimeError("Error in manual phase calibration")

    # image cals
    if parms["doImgCal"] and not check:
        logger.info("--> Image calibrators (doImgCal)")
        VLBAImageCals(uv,
                      err,
                      Sources=parms["contCals"],
                      seq=seq,
                      sclass=parms["outCclass"],
                      doCalib=2,
                      flagVer=2,
                      doBand=0,
                      FOV=parms["FOV"],
                      Robust=parms["Robust"],
                      maxPSCLoop=parms["maxPSCLoop"],
                      minFluxPSC=parms["minFluxPSC"],
                      solPInt=parms["solPInt"],
                      solMode=parms["solMode"],
                      maxASCLoop=parms["maxASCLoop"],
                      minFluxASC=parms["minFluxASC"],
                      solAInt=parms["solAInt"],
                      avgPol=parms["avgPol"],
                      avgIF=parms["avgIF"],
                      minSNR=parms["minSNR"],
                      refAnt=goodCal["bestRef"],
                      nThreads=nThreads,
                      noScrat=noScrat,
                      logfile=logFile,
                      check=check,
                      debug=debug)
        # Rewrite OKCal pickle file because calibrators may have been updated
        SaveObject(parms["contCals"], OKCalPicklefile, True)
        if len(parms["contCals"]) <= 0:
            logger.error(
                "No calibration sources have been detected! Stopping pipeline."
            )
            raise RuntimeError("No calibration sources have been detected!")

    # Check if calibrator models now available
    parms["contCalModel"] = VLBAImageModel(parms["contCals"],
                                           parms["outCclass"], disk, seq, err)

    # delay calibration
    if parms["doDelayCal"] and not check:
        logger.info("--> Delay calibration (doDelayCal)")
        plotFile = project + "_" + session + "_" + band + ".DelayCal.ps"
        retCode = VLBADelayCal(uv, err, calSou=parms["contCals"], CalModel=parms["contCalModel"], \
                                   doCalib=2, flagVer=2, doBand=0, \
                                   solInt=parms["manPCsolInt"], smoTime=parms["delaySmoo"],  \
                                   refAnts=[goodCal["bestRef"]], \
                                   doSNPlot=parms["doSNPlot"], plotFile=plotFile, \
                                   nThreads=nThreads, noScrat=noScrat, logfile=logFile, check=check, debug=debug)
        if retCode != 0:
            raise RuntimeError("Error in delay calibration")
        VLBASaveOutFiles()  # Save plot file in Outfiles

    # Bandpass calibration if needed
    if parms["doBPCal"] and not check:
        logger.info("--> Bandpass calibration (doBPCal)")
        retCode = VLBABPass(uv, goodCal["Source"], err, CalModel=parms["contCalModel"], \
                                timeRange=goodCal["timeRange"], doCalib=2, flagVer=2, \
                                noScrat=noScrat, solInt1=parms["bpsolint1"], \
                                solInt2=parms["bpsolint2"], solMode=parms["bpsolMode"], \
                                BChan1=parms["bpBChan1"], EChan1=parms["bpEChan1"], BChan2=parms["bpBChan2"], \
                                EChan2=parms["bpEChan2"], ChWid2=parms["bpChWid2"], \
                                doCenter1=parms["bpDoCenter1"], refAnt=goodCal["bestRef"], specIndex=parms["specIndex"], \
                                doAuto = parms["bpdoAuto"], \
                                nThreads=nThreads, logfile=logFile, check=check, debug=debug)
        if retCode != 0:
            raise RuntimeError("Error in Bandpass calibration")

    # Plot amplitude and phase vs. frequency
    if parms["doSpecPlot"]:
        logger.info("--> Spectral plotting (doSpecPlot)")
        plotFile = project + '_' + session + '_' + band + ".spec.ps"
        VLBASpecPlot(uv,
                     goodCal,
                     err,
                     doband=1,
                     check=check,
                     plotFile=plotFile,
                     logfile=logFile,
                     debug=debug)
        VLBASaveOutFiles()  # Save plot file in Outfiles

    # Phase calibration using calibrator models
    if parms["doPhaseCal"]:
        logger.info(
            "--> Phase calibration using calibrator models (doPhaseCal)")
        plotFile = project + "_" + session + "_" + band + ".PhaseCal0.ps"
        retCode = VLBAPhaseCal(uv, err, calSou=parms["contCals"], CalModel=parms["contCalModel"], \
                             doCalib=-1, flagVer=0, doBand=-1, \
                             refAnt=goodCal["bestRef"], solInt=parms["solPInt"], \
                             doSNPlot=parms["doSNPlot"], plotFile=plotFile, \
                             nThreads=nThreads, noScrat=noScrat, logfile=logFile, check=check, debug=debug)
        if retCode != 0:
            raise RuntimeError("Error in phase calibration")
        VLBASaveOutFiles()  # Save plot file in Outfiles
        # Rewrite contCals pickle file because contCals may have been updated
        SaveObject(parms["contCals"], OKCalPicklefile, True)
        if len(parms["contCals"]) <= 0:
            logger.error(
                "No calibrator sources have been detected! Stopping pipeline.")
            raise RuntimeError("No calibrator sources have been detected!")

    # Amplitude calibration
    if parms["doAmpCal"] and not check:
        logger.info("--> Amplitude calibration (doAmpCal)")
        plotFile = project + "_" + session + "_" + band + ".AmpCal.ps"
        retCode = VLBAAmpCal(uv,
                             err,
                             calSou=parms["contCals"],
                             CalModel=parms["contCalModel"],
                             doCalib=2,
                             flagVer=2,
                             doBand=1,
                             minSumCC=parms["minFluxASC"],
                             refAnt=goodCal["bestRef"],
                             solInt=parms["solAInt"],
                             smoTimeA=2.0,
                             smoTimeP=1. / 60.,
                             doSNPlot=parms["doSNPlot"],
                             plotFile=plotFile,
                             nThreads=nThreads,
                             noScrat=noScrat,
                             logfile=logFile,
                             check=check,
                             debug=debug)
        if retCode != 0:
            raise RuntimeError("Error in amplitude calibration")
        VLBASaveOutFiles()  # Save plot file in Outfiles
        # Rewrite contCals pickle file because contCals may have been updated
        SaveObject(parms["contCals"], OKCalPicklefile, True)
        if len(parms["contCals"]) <= 0:
            logger.error(
                "No calibrator sources have been detected! Stopping pipeline.")
            raise RuntimeError("No calibrator sources have been detected!")

    # Calibrate and average  data
    if parms["doCalAvg"]:
        logger.info("--> Calibration and average data (doCalAvg)")
        retCode = VLBACalAvg (uv, parms["avgClass"], seq, parms["CalAvgTime"], err, \
                                  flagVer=2, doCalib=2, gainUse=0, doBand=1, BPVer=1,  \
                                  BIF=parms["CABIF"], EIF=parms["CAEIF"], \
                                  BChan=parms["CABChan"], EChan=parms["CAEChan"], \
                                  chAvg=parms["chAvg"], avgFreq=parms["avgFreq"], Compress=Compress, \
                                  logfile=logFile, check=check, debug=debug)
        if retCode != 0:
            raise RuntimeError("Error in CalAvg")

    # image targets phase only self-cal - NOTE: actually A&P
    if parms["doImgTarget"] and not check:
        logger.info("--> Image targets (doImgTargets)")
        if not uvc:
            # Get calibrated/averaged data
            Aname = VLBAAIPSName(project, session)
            uvname = project + "_" + session + "_" + band + "_Cal"
            uvc = UV.newPAUV(uvname, Aname, parms["avgClass"], disk, seq, True,
                             err)
            if err.isErr:
                OErr.printErrMsg(err, "Error creating cal/avg AIPS data")
        logger.debug("parms['targets'] = " + str(parms["targets"]))
        VLBAImageCals(uv,
                      err,
                      Sources=parms["targets"],
                      seq=seq,
                      sclass=parms["outTclass"],
                      doCalib=2,
                      flagVer=2,
                      doBand=1,
                      FOV=parms["FOV"],
                      Robust=parms["Robust"],
                      maxPSCLoop=parms["maxPSCLoop"],
                      minFluxPSC=parms["minFluxPSC"],
                      solPInt=parms["solPInt"],
                      solMode=parms["solMode"],
                      maxASCLoop=parms["maxASCLoop"],
                      minFluxASC=parms["minFluxASC"],
                      solAInt=parms["solAInt"],
                      avgPol=parms["avgPol"],
                      avgIF=parms["avgIF"],
                      minSNR=parms["minSNR"],
                      refAnt=goodCal["bestRef"],
                      nThreads=nThreads,
                      noScrat=noScrat,
                      logfile=logFile,
                      check=check,
                      debug=debug)
        # Rewrite targets pickle file because targets may have been updated
        SaveObject(parms["targets"], targetsPicklefile, True)
        if len(parms["targets"]) <= 0:
            logger.error(
                "No target sources have been detected! Stopping pipeline.")
            raise RuntimeError("No target sources have been detected!")

    # Phase calibration using target models
    if parms["doPhaseCal"]:
        logger.info("--> Phase calibration using target models (doPhaseCal)")
        if not uvc:
            # Get calibrated/averaged data
            Aname = VLBAAIPSName(project, session)
            uvname = project + "_" + session + "_" + band + "_Cal"
            uvc = UV.newPAUV(uvname, Aname, parms["avgClass"], disk, seq, True,
                             err)
            if err.isErr:
                OErr.printErrMsg(err, "Error creating cal/avg AIPS data")
        parms["targetModel"] = VLBAImageModel(parms["targets"],
                                              parms["outTclass"], disk, seq,
                                              err)
        plotFile = project + "_" + session + "_" + band + ".PhaseCal.ps"
        retCode = VLBAPhaseCal(uvc, err, calSou=parms["targets"], CalModel=parms["targetModel"], \
                             doCalib=-1, flagVer=0, doBand=-1, \
                             refAnt=goodCal["bestRef"], solInt=parms["manPCsolInt"], \
                             doSNPlot=parms["doSNPlot"], plotFile=plotFile, \
                             nThreads=nThreads, noScrat=noScrat, logfile=logFile, check=check, debug=debug)
        if retCode != 0:
            raise RuntimeError("Error in phase calibration")
        VLBASaveOutFiles()  # Save plot file in Outfiles
        # Rewrite targets pickle file because targets may have been updated
        SaveObject(parms["targets"], targetsPicklefile, True)
        if len(parms["targets"]) <= 0:
            logger.error(
                "No target sources have been detected! Stopping pipeline.")
            raise RuntimeError("No target sources have been detected!")

    # Instrumental polarization calibration
    if parms["doInstPol"]:
        logger.info("--> Instrumental polarization calibration (doInstPol)")
        # calibrators defaults to strong calibrator list
        if not parms["instPolCal"]:
            instPolCal = contCals
        else:
            instPolCal = parms["instPolCal"]
        if not uvc:
            # Get calibrated/averaged data
            Aname = VLBAAIPSName(project, session)
            uvname = project + "_" + session + "_" + band + "_Cal"
            uvc = UV.newPAUV(uvname, Aname, parms["avgClass"], disk, seq, True,
                             err)
            if err.isErr:
                OErr.printErrMsg(err, "Error creating cal/avg AIPS data")
        retCode = VLBAPolCal(uvc, instPolCal, err, \
                                 doCalib=2, flagVer=0, doBand=-1, doSetJy=True, \
                                 refAnt=goodCal["bestRef"], solInt=2.0, \
                                 noScrat=noScrat, logfile=logFile, check=check, debug=debug)
        if retCode != 0:
            raise RuntimeError("Error in instrumental poln calibration")

    # RL Phase (EVPA) calibration as BP table
    if parms["doRLCal"] and parms["RLCal"]:
        logger.info("--> RL phase calibration (doRLCal)")
        if not uvc:
            # Get calibrated/averaged data
            Aname = VLBAAIPSName(project, session)
            uvname = project + "_" + session + "_" + band + "_Cal"
            uvc = UV.newPAUV(uvname, Aname, parms["avgClass"], disk, seq, True,
                             err)
            if err.isErr:
                OErr.printErrMsg(err, "Error creating cal/avg AIPS data")
        retCode = VLBARLCal2(uvc, err, RLPCal=parms["RLCal"], \
                                doCalib=2, gainUse=2, flagVer=0, doBand=-1, doPol=True,  \
                                refAnt=goodCal["bestRef"], niter=300, FOV=0.02/3600.0, \
                                nThreads=nThreads, noScrat=noScrat, logfile=logFile, check=check, debug=debug)
        if retCode != 0:
            raise RuntimeError("Error in RL phase calibration")

    # image targets possible with Stokes I(QU)
    if parms["doImgFullTarget"]:
        logger.info("--> Image targets (doImgFullTargets)")
        if not uvc:
            # Get calibrated/averaged data
            Aname = VLBAAIPSName(project, session)
            uvname = project + "_" + session + "_" + band + "_Cal"
            uvc = UV.newPAUV(uvname, Aname, parms["avgClass"], disk, seq, True,
                             err)
            if err.isErr:
                OErr.printErrMsg(err, "Error creating cal/avg AIPS data")
        VLBAImageTargets(uvc,
                         err,
                         Sources=parms["targets"],
                         seq=seq,
                         sclass=parms["outIclass"],
                         doCalib=2,
                         flagVer=0,
                         doBand=-1,
                         Stokes=parms["Stokes"],
                         FOV=parms["FOV"],
                         Robust=parms["Robust"],
                         maxPSCLoop=2,
                         minFluxPSC=parms["minFluxPSC"],
                         solPInt=parms["solPInt"],
                         solMode="P",
                         maxASCLoop=parms["maxASCLoop"],
                         minFluxASC=parms["minFluxASC"],
                         solAInt=parms["solAInt"],
                         avgPol=parms["avgPol"],
                         avgIF=parms["avgIF"],
                         minSNR=parms["minSNR"],
                         refAnt=goodCal["bestRef"],
                         nTaper=parms["nTaper"],
                         Tapers=parms["Tapers"],
                         do3D=parms["do3D"],
                         nThreads=nThreads,
                         noScrat=noScrat,
                         logfile=logFile,
                         check=check,
                         debug=debug)

    # Save UV data?
    if parms["doSaveUV"] and (not check):
        logger.info("--> Save UV data (doSaveUV)")
        mess = "Write calibrated and averaged UV data to disk"
        printMess(mess, logFile)
        # Get calibrated/averaged data
        if not uvc:
            Aname = VLBAAIPSName(project, session)
            uvname = project + "_" + session + "_" + band + "_Cal"
            uvc = UV.newPAUV(uvname, Aname, parms["avgClass"], disk, seq, True,
                             err)
            if err.isErr:
                OErr.printErrMsg(err, "Error creating cal/avg AIPS data")
        # Write
        filename = project + session + band + "CalAvg.uvtab"
        fuv = VLBAUVFITS(uvc, filename, 0, err, compress=Compress)
        VLBAAddOutFile(filename, 'project', "Calibrated Averaged UV data")
        # Save list of output files
        VLBASaveOutFiles()

    # Save UV data tables?
    if parms["doSaveTab"] and (not check):
        logger.info("--> Save UV data tables (doSaveTab)")
        mess = "Write UV data tables to disk."
        printMess(mess, logFile)
        filename = project + session + band + "CalTab.uvtab"
        fuv = VLBAUVFITSTab(uv, filename, 0, err)
        VLBAAddOutFile(filename, 'project', "Calibrated AIPS tables")
        # Save list of output files
        VLBASaveOutFiles()
    # Imaging results
    outDisk = 0
    if parms["doSaveImg"]:
        logger.info("--> Save images (doSaveImg)")
        mess = "Write images to disk."
        printMess(mess, logFile)
        # How many Stokes images
        nstok = len(parms["Stokes"])
        # Targets
        logger.debug("parms['targets'] = " + str(parms['targets']))
        for target in parms["targets"]:
            if not check:
                # intermediate images
                oclass = parms["outTclass"]
                x = Image.newPAImage("out", target, oclass, disk, seq, True,
                                     err)
                if (not x.exist):
                    print(target, "image not found. Skipping.")
                    continue
                outfile = project + session + band + target + "." + oclass + ".fits"
                mess = "Write Intermediate target " + outfile + " on disk " + str(
                    outDisk)
                VLBAAddOutFile(outfile, target, 'Intermediate target image')
                printMess(mess, logFile)
                xf = VLBAImFITS(x, outfile, outDisk, err, fract=0.1)
                # Save list of output files
                VLBASaveOutFiles()
                # Statistics
                zz = imstat(x, err, logfile=logFile)
                del x, xf
                # Final images
                for istok in range(0, nstok):
                    oclass = parms["Stokes"][istok:istok +
                                             1] + parms["outIclass"][1:]
                    x = Image.newPAImage("out", target, oclass, disk, seq,
                                         True, err)
                    outfile = project + session + band + target + "." + oclass + ".fits"
                    logger.info("Write " + outfile + " on disk " +
                                str(outDisk))
                    xf = VLBAImFITS(x, outfile, outDisk, err, fract=0.1)
                    VLBAAddOutFile(outfile, target, 'Image')
                    logger.info("Writing file " + outfile)
                    # Statistics
                    zz = imstat(x, err, logfile=logFile)
                    del x, xf
                    # Save list of output files
                    VLBASaveOutFiles()
        # Calibrators
        for target in parms["contCals"]:
            if not check:
                oclass = parms["outCclass"]
                x = Image.newPAImage("out", target, oclass, disk, seq, True,
                                     err)
                if (not x.exist):
                    print(target, "image not found. Skipping.")
                    continue
                outfile = project + session + band + target + "." + oclass + ".fits"
                mess = "Write Calibrator " + outfile + " on disk " + str(
                    outDisk)
                printMess(mess, logFile)
                xf = VLBAImFITS(x, outfile, outDisk, err, fract=0.1)
                VLBAAddOutFile(outfile, target, 'Calibrator Image')
                # Statistics
                zz = imstat(x, err, logfile=logFile)
                del x, xf
                # Save list of output files
                VLBASaveOutFiles()
        # end writing images loop

    # Contour plots
    if parms["doKntrPlots"]:
        logger.info("--> Contour plots (doKntrPlots)")
        VLBAKntrPlots(err,
                      imName=parms["targets"],
                      project=project,
                      session=session,
                      band=band,
                      disk=disk,
                      debug=debug)
        # Save list of output files
        VLBASaveOutFiles()
    elif debug:
        print("Not creating contour plots ( doKntrPlots = ",
              parms["doKntrPlots"], " )")

    # Source uv plane diagnostic plots
    if parms["doDiagPlots"]:
        logger.info("--> Diagnostic plots (doDiagPlots)")
        # Get the highest number avgClass catalog file
        Aname = VLBAAIPSName(project, session)
        uvc = None
        if not check:
            uvname = project + "_" + session + "_" + band + "_Cal"
            uvc = UV.newPAUV(uvname, Aname, parms["avgClass"], disk, seq, True,
                             err)
        VLBADiagPlots( uvc, err, cleanUp=parms["doCleanup"], \
                           project=project, session=session, band=band, \
                           logfile=logFile, check=check, debug=debug )
        # Save list of output files
        VLBASaveOutFiles()
    elif debug:
        print("Not creating diagnostic plots ( doDiagPlots = ",
              parms["doDiagPlots"], " )")

    # Save metadata
    srcMetadata = None
    projMetadata = None
    if parms["doMetadata"]:
        logger.info("--> Save metadata (doMetadata)")
        if not uvc:
            # Get calibrated/averaged data
            Aname = VLBAAIPSName(project, session)
            uvname = project + "_" + session + "_" + band + "_Cal"
            uvc = UV.newPAUV(uvname, Aname, parms["avgClass"], disk, seq, True,
                             err)
            if err.isErr:
                OErr.printErrMsg(err, "Error creating cal/avg AIPS data")

        # Get source metadata; save to pickle file
        srcMetadata = VLBASrcMetadata(uvc,
                                      err,
                                      Sources=parms["targets"],
                                      seq=seq,
                                      sclass=parms["outIclass"],
                                      Stokes=parms["Stokes"],
                                      logfile=logFile,
                                      check=check,
                                      debug=debug)
        picklefile = project + "_" + session + "_" + band + "SrcReport.pickle"
        SaveObject(srcMetadata, picklefile, True)
        VLBAAddOutFile(picklefile, 'project', 'All source metadata')

        # Get project metadata; save to pickle file
        projMetadata = VLBAProjMetadata(uvc,
                                        AIPS_VERSION,
                                        err,
                                        contCals=parms["contCals"],
                                        goodCal=goodCal,
                                        project=project,
                                        session=session,
                                        band=band,
                                        dataInUVF=dataInUVF,
                                        archFileID=archFileID)
        picklefile = project + "_" + session + "_" + band + "ProjReport.pickle"
        SaveObject(projMetadata, picklefile, True)
        VLBAAddOutFile(picklefile, 'project', 'Project metadata')

    # Write report
    if parms["doHTML"]:
        logger.info("--> Write HTML report (doHTML)")
        VLBAHTMLReport( projMetadata, srcMetadata, \
                            outfile=project+"_"+session+"_"+band+"report.html", \
                            logFile=logFile )

    # Write VOTable
    if parms["doVOTable"]:
        logger.info("--> Write VOTable (doVOTable)")
        VLBAAddOutFile('VOTable.xml', 'project', 'VOTable report')
        VLBAWriteVOTable(projMetadata, srcMetadata, filename='VOTable.xml')

    # Save list of output files
    VLBASaveOutFiles()

    # Cleanup - delete AIPS files
    if parms["doCleanup"] and (not check):
        logger.info("--> Clean up (doCleanup)")
        # Delete target images
        # How many Stokes images
        nstok = len(parms["Stokes"])
        for istok in range(0, nstok):
            oclass = parms["Stokes"][istok:istok + 1] + parms["outIclass"][1:]
            AllDest(err, disk=disk, Aseq=seq, Aclass=oclass)

        # delete Calibrator images
        AllDest(err, disk=disk, Aseq=seq, Aclass=parms["outCclass"])

        # Delete intermediate target images
        AllDest(err, disk=disk, Aseq=seq, Aclass=parms["outTclass"])
        OErr.printErrMsg(err, "Deleting AIPS images")

        # Delete UV data
        uv.Zap(err)
        # Zap calibrated/averaged data
        if not uvc:
            Aname = VLBAAIPSName(project, session)
            uvc = UV.newPAUV("AIPS UV DATA", Aname, parms["avgClass"], disk,
                             seq, True, err)
            if err.isErr:
                OErr.printErrMsg(err, "Error creating cal/avg AIPS data")
        uvc.Zap(err)
        OErr.printErrMsg(err, "Writing output/cleanup")

    # Shutdown
    mess = "Finished project " + project
    printMess(mess, logFile)
    OErr.printErr(err)
    OSystem.Shutdown(ObitSys)
Exemplo n.º 21
0
def ConvertKATData(outUV,
                   katdata,
                   meta,
                   err,
                   static=None,
                   blmask=1.e10,
                   stop_w=False,
                   timeav=1,
                   flag=False,
                   doweight=True,
                   doflags=True):
    """
    Read KAT HDF data and write Obit UV

     * outUV    = Obit UV object
     * katdata  = input KAT dataset
     * meta     = dict with data meta data
     * err      = Python Obit Error/message stack to init
    """
    ################################################################
    reffreq = meta["spw"][0][1]  # reference frequency
    lamb = 2.997924562e8 / reffreq  # wavelength of reference freq
    nchan = meta["spw"][0][0]  # number of channels
    nif = len(meta["spw"])  # Number of IFs
    nstok = meta["nstokes"]  # Number of Stokes products
    newants = meta["newants"]
    p = meta["products"]  # baseline stokes indices
    b = meta["baselines"]
    bi = meta["blineind"]
    nbase = b.shape[0]  # number of correlations/baselines
    nprod = nbase * nstok
    antslookup = meta["antLookup"]
    # work out Start time in unix sec
    tm = katdata.timestamps[0]
    tx = time.gmtime(tm)
    time0 = tm - tx[3] * 3600.0 - tx[4] * 60.0 - tx[5]

    # Set data to read one timestamp per IO
    outUV.List.set("nVisPIO", nbase)
    d = outUV.Desc.Dict
    d.update(numVisBuff=nbase)
    outUV.Desc.Dict = d
    # Open data
    zz = outUV.Open(UV.READWRITE, err)
    if err.isErr:
        OErr.printErrMsg(err, "Error opening output UV")
    # visibility record offsets
    idb = {}
    idb['ilocu'] = d['ilocu']
    idb['ilocv'] = d['ilocv']
    idb['ilocw'] = d['ilocw']
    idb['iloct'] = d['iloct']
    idb['ilocb'] = d['ilocb']
    idb['ilocsu'] = d['ilocsu']
    idb['nrparm'] = d['nrparm']

    visshape = nchan * nstok * 3
    count = 0.0
    # Get IO buffers as numpy arrays
    buff = numpy.frombuffer(outUV.VisBuf, dtype=numpy.float32)
    #Set up a flagger if needs be
    if flag:
        flagger = SumThresholdFlagger(
            outlier_nsigma=4.5,
            freq_chunks=7,
            spike_width_freq=1.5e6 / katdata.channel_width,
            spike_width_time=100. / katdata.dump_period,
            time_extend=3,
            freq_extend=3,
            average_freq=1)
    #Set up the baseline mask
    blmask = get_baseline_mask(newants, katdata.corr_products, blmask)

    # Template vis
    vis = outUV.ReadVis(err, firstVis=1)
    first = True
    visno = 1
    numflags = 0
    numvis = 0
    # Do we need to stop Fringes
    if stop_w:
        msg = "W term in UVW coordinates will be used to stop the fringes."
        OErr.PLog(err, OErr.Info, msg)
        OErr.printErr(err)
        print(msg)

    # Set up baseline vectors of uvw calculation
    array_centre = katpoint.Antenna('', *newants[0].ref_position_wgs84)
    baseline_vectors = numpy.array(
        [array_centre.baseline_toward(antenna) for antenna in newants])

    max_scan = 151
    QUACK = 1
    # Generate arrays for storage
    scan_vs = numpy.empty((max_scan, nchan, nprod), dtype=katdata.vis.dtype)
    scan_fg = numpy.empty((max_scan, nchan, nprod), dtype=katdata.flags.dtype)
    scan_wt = numpy.empty((max_scan, nchan, nprod),
                          dtype=katdata.weights.dtype)
    for scan, state, target in katdata.scans():
        # Don't read at all if all will be "Quacked"
        if katdata.shape[0] < ((QUACK + 1) * timeav):
            continue
        # Chunk data into max_scan dumps
        if katdata.shape[0] > max_scan:
            scan_slices = [
                slice(i, i + max_scan, 1)
                for i in range(QUACK * timeav, katdata.shape[0], max_scan)
            ]
            scan_slices[-1] = slice(scan_slices[-1].start, katdata.shape[0], 1)
        else:
            scan_slices = [slice(QUACK * timeav, katdata.shape[0])]

        # Number of integrations
        num_ints = katdata.timestamps.shape[0] - QUACK * timeav
        msg = "Scan:%4d Int: %4d %16s Start %s" % (
            scan, num_ints, target.name,
            day2dhms((katdata.timestamps[0] - time0) / 86400.0)[0:12])
        OErr.PLog(err, OErr.Info, msg)
        OErr.printErr(err)
        print(msg)
        for sl in scan_slices:
            tm = katdata.timestamps[sl]
            nint = tm.shape[0]
            load(katdata, numpy.s_[sl.start:sl.stop, :, :], scan_vs[:nint],
                 scan_wt[:nint], scan_fg[:nint], err)
            # Make sure we've reset the weights
            wt = scan_wt[:nint]
            if doweight == False:
                wt[:] = 1.
            vs = scan_vs[:nint]
            fg = scan_fg[:nint]
            if doflags == False:
                fg[:] = False
            if static is not None:
                fg[:, :, blmask] |= static[numpy.newaxis, :, numpy.newaxis]
            if flag:
                fg |= flag_data(vs, fg, flagger)
            if timeav > 1:
                vs, wt, fg, tm, _ = averager.average_visibilities(
                    vs,
                    wt,
                    fg,
                    tm,
                    katdata.channel_freqs,
                    timeav=int(timeav),
                    chanav=1)
                # Update number of integrations for averaged data.
                nint = tm.shape[0]
            # Get target suid
            # Only on targets in the input list
            try:
                suid = meta["targLookup"][target.name[0:16]]
            except:
                continue

            numflags += numpy.sum(fg)
            numvis += fg.size

            # uvw calculation
            uvw_coordinates = get_uvw_coordinates(array_centre,
                                                  baseline_vectors, tm, target,
                                                  bi)

            # Convert to aipsish
            uvw_coordinates /= lamb

            # Convert to AIPS time
            tm = (tm - time0) / 86400.0

            #Get random parameters for this scan
            rp = get_random_parameters(idb, b, uvw_coordinates, tm, suid)
            # Loop over integrations
            for iint in range(0, nint):
                # Fill the buffer for this integration
                buff = fill_buffer(vs[iint], fg[iint], wt[iint], rp[iint], p,
                                   bi, buff)
                # Write to disk
                outUV.Write(err, firstVis=visno)
                visno += nbase
                firstVis = None
            # end loop over integrations
            if err.isErr:
                OErr.printErrMsg(err, "Error writing data")
    # end loop over scan
    if numvis > 0:
        msg = "Applied %s online flags to %s visibilities (%.3f%%)" % (
            numflags, numvis, (float(numflags) / float(numvis) * 100.))
        OErr.PLog(err, OErr.Info, msg)
        OErr.printErr(err)
    outUV.Close(err)
    if err.isErr:
        OErr.printErrMsg(err, "Error closing data")
Exemplo n.º 22
0
def MKContPipeline(files, outputdir, **kwargs):
    """MeerKAT Continuum pipeline.

    Parameters
    ----------
    files : list
        h5 filenames (note: support for multiple h5 files 
        i.e. ConcatenatedDataSet is not currently supported)
    outputdir : string
        Directory location to write output data, 
    scratchdir : string, optional
        The directory location of the aips disk
    parmFile : string, optional
        Overwrite the default imaging parameters using this parameter file.
    """
    #if len(files) > 1:
    #    raise TooManyKatfilesException('Processing multiple katfiles are not currently supported')
    h5file = files

    # Die gracefully if we cannot write to the output area...
    if not os.path.exists(outputdir):
        print('Specified output directory: '+ outputdir + 'does not exist.')
        exit(-1)

    # Obit error logging
    err = OErr.OErr()

    #################### Initialize filenames #######################################################
    fileRoot      = os.path.join(outputdir, os.path.basename(os.path.splitext(files[0])[0])) # root of file name
    logFile       = fileRoot+".log"   # Processing log file
    avgClass      = ("UVAv")[0:6]  # Averaged data AIPS class
    manifestfile  = outputdir + '/manifest.pickle'

    ############################# Initialize OBIT and AIPS ##########################################
    noScrat     = []
    # Logging directly to logFile
    OErr.PInit(err, 2, logFile)
    EVLAAddOutFile(os.path.basename(logFile), 'project', 'Pipeline log file')

    ObitSys = AIPSSetup.AIPSSetup(err,configfile=kwargs.get('configFile'),scratchdir=kwargs.get('scratchdir'))

    # Get the set up AIPS environment.
    AIPS_ROOT    = os.environ['AIPS_ROOT']
    AIPS_VERSION = os.environ['AIPS_VERSION']

    nThreads = 24
    user = OSystem.PGetAIPSuser()
    AIPS.userno = user
    disk = 1
    fitsdisk = 0
    nam = os.path.basename(os.path.splitext(files[0])[0])[0:10]
    cls = "Raw"
    seq = 1

    ############################# Initialise Parameters ##########################################
    ####### Initialize parameters dictionary ##### 
    parms = KATInitContParms()
    ####### User defined parameters ######
    if kwargs.get('parmFile'):
        print("parmFile",kwargs.get('parmFile'))
        exec(open(kwargs.get('parmFile')).read())
        EVLAAddOutFile(os.path.basename(kwargs.get('parmFile')), 'project', 'Pipeline input parameters' )

    ############### Initialize katfile object, uvfits object and condition data #########################
    OK = False
    # Open the h5 file as a katfile object
    try:
        #open katfile and perform selection according to kwargs
        katdata = katfile.open(h5file)
        OK = True
    except Exception as exception:
        print(exception)
    if not OK:
        OErr.PSet(err)
        OErr.PLog(err, OErr.Fatal, "Unable to read KAT HDF5 data in " + str(h5file))
        raise KATUnimageableError("Unable to read KAT HDF5 data in " + str(h5file))

    #We have a katdal object- read some flags and ad them in in available
    if kwargs.get('flags') is not None:
        flags=kwargs.get('flags')
        fa = flags.split(',')
        for fn,ff in enumerate(fa):
	        ex_flags_file = h5py.File(ff)
	        ex_flags = da.from_array(ex_flags_file['flags'], chunks=(1,342,katdata.shape[2]))
	        #Sum the new flags 
        	katdata.datasets[fn].source.data.flags = ex_flags

    #Are we MeerKAT or KAT-7
    telescope = katdata.ants[0].name[0]
    if telescope=='m':
        sefd=500.
    else:
        sefd=1200.
    #Get calibrator models
    fluxcals = katpoint.Catalogue(file(FITSDir.FITSdisks[0]+"/"+parms["fluxModel"]))
    #Condition data (get bpcals, update names for aips conventions etc)
    KATh5Condition(katdata,fluxcals,err)

    ###################### Data selection and static edits ############################################
    # Select data based on static imageable parameters
    MKATh5Select(katdata, parms, err, **kwargs)

    ####################### Import data into AIPS #####################################################
    # Construct a template uvfits file from master template
    mastertemplate=ObitTalkUtil.FITSDir.FITSdisks[fitsdisk]+'MKATTemplate.uvtab.gz'
    outtemplate=nam+'.uvtemp'
    KATH5toAIPS.MakeTemplate(mastertemplate,outtemplate,len(katdata.channel_freqs))
    uv=OTObit.uvlod(outtemplate,0,nam,cls,disk,seq,err)

    obsdata = KATH5toAIPS.KAT2AIPS(katdata, uv, disk, fitsdisk, err, calInt=1.0, **kwargs)
    MakeIFs.UVMakeIF(uv,8,err)

    # Print the uv data header to screen.
    uv.Header(err)
    os.remove(outtemplate)
    ############################# Set Project Processing parameters ###################################
    # Parameters derived from obsdata and katdata
    MKATGetObsParms(obsdata, katdata, parms, logFile)

    ###### Initialise target parameters #####
    KATInitTargParms(katdata,parms,err)

    # General AIPS data parameters at script level
    dataClass = ("UVDa")[0:6]      # AIPS class of raw uv data
    band      = katdata.spectral_windows[0].product #Correlator product
    project   = parms["project"][0:12]  # Project name (12 char or less, used as AIPS Name)
    outIClass = parms["outIClass"] # image AIPS class
    debug     = parms["debug"]
    check     = parms["check"]

    # Load the outputs pickle jar
    EVLAFetchOutFiles()

    OSystem.PAllowThreads(nThreads)   # Allow threads in Obit/oython
    retCode = 0

    maxgap = max(parms["CalAvgTime"],20*katdata.dump_period)/60.
    ################### Start processing ###############################################################

    mess = "Start project "+parms["project"]+" AIPS user no. "+str(AIPS.userno)+\
           ", KAT7 configuration "+parms["KAT7Cfg"]
    printMess(mess, logFile)
    if debug:
        pydoc.ttypager = pydoc.plainpager # don't page task input displays
        mess = "Using Debug mode "
        printMess(mess, logFile)
    if check:
        mess = "Only checking script"
        printMess(mess, logFile)

    # Log parameters
    printMess("Parameter settings", logFile)
    for p in parms:
        mess = "  "+p+": "+str(parms[p])
        printMess(mess, logFile)
    clist = []
    for DCal in parms["DCals"]:
        if DCal["Source"] not in clist:
            clist.append(DCal["Source"])
    for PCal in parms["PCals"]:
        if PCal["Source"] not in clist:
            clist.append(PCal["Source"])
    for ACal in parms["ACals"]:
        if ACal["Source"] not in clist:
            clist.append(ACal["Source"])
    if kwargs.get('targets') is not None:
        targets = [targ.name for targ in katdata.catalogue if (targ.name not in clist) and (targ.name in kwargs.get('targets').split(','))]
    else:
        targets = [targ.name for targ in katdata.catalogue if (targ.name not in clist)]
    refAnt = FetchObject(fileRoot+".refAnt.pickle")

    # Save parameters to pickle jar, manifest
    ParmsPicklefile = fileRoot+".Parms.pickle"   # Where results saved
    SaveObject(parms, ParmsPicklefile, True)
    EVLAAddOutFile(os.path.basename(ParmsPicklefile), 'project', 'Processing parameters used' )
    loadClass = dataClass

    retCode = KATCalAvg (uv, "PREAVG", parms["seq"], parms["CalAvgTime"], err, \
                              flagVer=-1, doCalib=-1, gainUse=-1, doBand=-1, BPVer=-1, doPol=False, \
                              avgFreq=0, chAvg=1, BChan=1, EChan=0, doAuto=parms["doAuto"], Stokes=' ',\
                              BIF=parms["CABIF"], EIF=parms["CAEIF"], Compress=parms["Compress"], \
                              nThreads=nThreads, logfile=logFile, check=check, debug=debug)
    if retCode!=0:
        raise  RuntimeError("Error in CalAvg")
    uv.Zap(err)
    # Get initially averaged data
    if not check:
        uv = UV.newPAUV("AIPS UV DATA", EVLAAIPSName(project), "PREAVG", \
                        disk, parms["seq"], True, err)
        if err.isErr:
            OErr.printErrMsg(err, "Error creating initial avg AIPS data")

    # Hanning
    parms["doHann"]=True
    if parms["doHann"]:
       # Set uv if not done
        if uv==None and not check:
            uv = UV.newPAUV("AIPS UV DATA", EVLAAIPSName(project), loadClass[0:6], disk, parms["seq"], True, err)
            if err.isErr:
                OErr.printErrMsg(err, "Error creating AIPS data")

        uv = KATHann(uv, EVLAAIPSName(project), dataClass, disk, parms["seq"], err, \
                      doDescm=parms["doDescm"], flagVer=0, logfile=logFile, check=check, debug=debug)
        #Halve channels after hanning.
        parms["selChan"]=int(parms["selChan"]/2)
        parms["BChDrop"]=int(parms["BChDrop"]/2)
        parms["EChDrop"]=int(parms["EChDrop"]/2)
        if uv==None and not check:
            raise RuntimeError("Cannot Hann data ")
 
    # Clear any old calibration/editing 
    if parms["doClearTab"]:
        mess =  "Clear previous calibration"
        printMess(mess, logFile)
        EVLAClearCal(uv, err, doGain=parms["doClearGain"], doFlag=parms["doClearFlag"], doBP=parms["doClearBP"], check=check)
        OErr.printErrMsg(err, "Error resetting calibration")

        # Quack to remove data from start and end of each scan
    if parms["doQuack"]:
        retCode = EVLAQuack (uv, err, begDrop=parms["quackBegDrop"], endDrop=parms["quackEndDrop"], \
                             Reason=parms["quackReason"], \
                             logfile=logFile, check=check, debug=debug)
        if retCode!=0:
            raise RuntimeError("Error Quacking data")
    
    # Flag antennas shadowed by others?
    if parms["doShad"]:
        retCode = EVLAShadow (uv, err, shadBl=parms["shadBl"], \
                              logfile=logFile, check=check, debug=debug)
        if retCode!=0:
            raise RuntimeError("Error Shadow flagging data")
    

    # Median window time editing, for RFI impulsive in time
    if parms["doMednTD1"]:
        mess =  "Median window time editing, for RFI impulsive in time:"
        printMess(mess, logFile)
        retCode = EVLAMedianFlag (uv, clist, err, noScrat=noScrat, nThreads=nThreads, \
                                  avgTime=parms["mednAvgTime"], avgFreq=parms["mednAvgFreq"],  chAvg= parms["mednChAvg"], \
                                  timeWind=parms["mednTimeWind"],flagVer=2, flagTab=2,flagSig=parms["mednSigma"], \
                                  logfile=logFile, check=check, debug=False)
        if retCode!=0:
            raise RuntimeError("Error in MednFlag")
    
    # Median window frequency editing, for RFI impulsive in frequency
    if parms["doFD1"]:
        mess =  "Median window frequency editing, for RFI impulsive in frequency:"
        printMess(mess, logFile)
        retCode = EVLAAutoFlag (uv, clist, err, flagVer=2, flagTab=2, doCalib=-1, doBand=-1,   \
                                timeAvg=parms["FD1TimeAvg"], \
                                doFD=True, FDmaxAmp=1.0e20, FDmaxV=1.0e20, FDwidMW=parms["FD1widMW"],  \
                                FDmaxRMS=[1.0e20,0.1], FDmaxRes=parms["FD1maxRes"],  \
                                FDmaxResBL= parms["FD1maxRes"],  FDbaseSel=parms["FD1baseSel"],\
                                nThreads=nThreads, logfile=logFile, check=check, debug=debug)
        if retCode!=0:
           raise  RuntimeError("Error in AutoFlag")
    
    # Parallactic angle correction?
    if parms["doPACor"]:
        retCode = EVLAPACor(uv, err, \
                                logfile=logFile, check=check, debug=debug)
        if retCode!=0:
            raise RuntimeError("Error in Parallactic angle correction")
    
    # Need to find a reference antenna?  See if we have saved it?
    if (parms["refAnt"]<=0):
        refAnt = FetchObject(fileRoot+".refAnt.pickle")
        if refAnt:
            parms["refAnt"] = refAnt

    # Use bandpass calibrator and center half of each spectrum
    if parms["refAnt"]<=0:
        mess = "Find best reference antenna: run Calib on BP Cal(s) "
        printMess(mess, logFile)
        parms["refAnt"] = EVLAGetRefAnt(uv, parms["BPCals"], err, flagVer=0, \
                                        solInt=parms["bpsolint1"], nThreads=nThreads, \
                                        logfile=logFile, check=check, debug=debug)
        if err.isErr:
                raise  RuntimeError("Error finding reference antenna")
        if parms["refAnts"][0]<=0:
            parms["refAnts"][0] = parms["refAnt"]
        mess = "Picked reference antenna "+str(parms["refAnt"])
        printMess(mess, logFile)
        # Save it
        ParmsPicklefile = fileRoot+".Parms.pickle"   # Where results saved
        SaveObject(parms, ParmsPicklefile, True)
        refAntPicklefile = fileRoot+".refAnt.pickle"   # Where results saved
        SaveObject(parms["refAnt"], refAntPicklefile, True)


    # Plot Raw, edited data?
    parms["doRawSpecPlot"]=False
    parms["doSpecPlot"]=False
    if parms["doRawSpecPlot"] and parms["plotSource"]:
        mess =  "Raw Spectral plot for: "+' '.join(parms["BPCal"])
        printMess(mess, logFile)
        plotFile = fileRoot+"_RawSpec.ps"
        retCode = EVLASpectrum(uv, parms["BPCal"], parms["plotTime"], maxgap, plotFile, parms["refAnt"], err, \
                               Stokes=["RR","LL"], doband=-1,          \
                               check=check, debug=debug, logfile=logFile )
        if retCode!=0:
            raise  RuntimeError("Error in Plotting spectrum")
        EVLAAddOutFile(plotFile, 'project', 'Pipeline log file' )

    # delay calibration
    if parms["doDelayCal"] and parms["DCals"] and not check:
        plotFile = fileRoot+"_DelayCal.ps"
        retCode = EVLADelayCal(uv, parms["DCals"], err,  \
                               BChan=parms["delayBChan"], EChan=parms["delayEChan"], \
                               doCalib=2, flagVer=0, doBand=-1, \
                               solInt=parms["delaySolInt"], smoTime=parms["delaySmoo"],  \
                               refAnts=[parms["refAnt"]], doTwo=parms["doTwo"], 
                               doZeroPhs=parms["delayZeroPhs"], \
                               doPlot=parms["doSNPlot"], plotFile=plotFile, \
                               nThreads=nThreads, noScrat=noScrat, \
                               logfile=logFile, check=check, debug=debug)
        if retCode!=0:
            raise RuntimeError("Error in delay calibration")

        # Plot corrected data?
        if parms["doSpecPlot"] and parms["plotSource"]:
            plotFile = fileRoot+"_DelaySpec.ps"
            retCode = EVLASpectrum(uv, parms["BPCal"], parms["plotTime"], maxgap, \
                                   plotFile, parms["refAnt"], err, \
                                   Stokes=["RR","LL"], doband=-1,          \
                                   check=check, debug=debug, logfile=logFile )
            if retCode!=0:
                raise  RuntimeError("Error in Plotting spectrum")
	print(parms["bpBChan1"],parms["bpEChan1"],parms["bpBChan2"],parms["bpEChan2"],parms["bpChWid2"])
    # Bandpass calibration
    if parms["doBPCal"] and parms["BPCals"]:
        retCode = KATBPCal(uv, parms["BPCals"], err, noScrat=noScrat, solInt1=parms["bpsolint1"], \
                            solInt2=parms["bpsolint2"], solMode=parms["bpsolMode"], \
                            BChan1=parms["bpBChan1"], EChan1=parms["bpEChan1"], \
                            BChan2=parms["bpBChan2"], EChan2=parms["bpEChan2"], ChWid2=parms["bpChWid2"], \
                            doCenter1=parms["bpDoCenter1"], refAnt=parms["refAnt"], \
                            UVRange=parms["bpUVRange"], doCalib=2, gainUse=0, flagVer=0, doPlot=False, \
                            nThreads=nThreads, logfile=logFile, check=check, debug=debug)
        if retCode!=0:
            raise RuntimeError("Error in Bandpass calibration")

        # Plot corrected data?
        if parms["doSpecPlot"] and  parms["plotSource"]:
            plotFile = fileRoot+"_BPSpec.ps"
            retCode = EVLASpectrum(uv, parms["BPCal"], parms["plotTime"], maxgap, plotFile, \
                                   parms["refAnt"], err, Stokes=["RR","LL"], doband=1,          \
                                   check=check, debug=debug, logfile=logFile )
            if retCode!=0:
                raise  RuntimeError("Error in Plotting spectrum")

    # Amp & phase Calibrate
    if parms["doAmpPhaseCal"]:
        plotFile = fileRoot+"_APCal.ps"
        retCode = KATCalAP (uv, [], parms["ACals"], err, PCals=parms["PCals"], 
                             doCalib=2, doBand=1, BPVer=1, flagVer=0, \
                             BChan=parms["ampBChan"], EChan=parms["ampEChan"], \
                             solInt=parms["solInt"], solSmo=parms["solSmo"], ampScalar=parms["ampScalar"], \
                             doAmpEdit=parms["doAmpEdit"], ampSigma=parms["ampSigma"], \
                             ampEditFG=parms["ampEditFG"], \
                             doPlot=parms["doSNPlot"], plotFile=plotFile,  refAnt=parms["refAnt"], \
                             nThreads=nThreads, noScrat=noScrat, logfile=logFile, check=check, debug=debug)
        #print parms["ACals"],parms["PCals"]
        if retCode!=0:
            raise RuntimeError("Error calibrating")

    # More editing

    if parms["doAutoFlag"]:
        mess =  "Post calibration editing:"
        printMess(mess, logFile)
        # if going to redo then only calibrators
        if parms["doRecal"]:
            # Only calibrators
            clist = []
            for DCal in parms["DCals"]:
                if DCal["Source"] not in clist:
                    clist.append(DCal["Source"])
            for PCal in parms["PCals"]:
                if PCal["Source"] not in clist:
                    clist.append(PCal["Source"])
            for ACal in parms["ACals"]:
                if ACal["Source"] not in clist:
                    clist.append(ACal["Source"])
        else:
            clist = []

        retCode = EVLAAutoFlag (uv, clist, err, flagVer=0, flagTab =2, \
                                doCalib=2, gainUse=0, doBand=1, BPVer=1,  \
                                IClip=parms["IClip"], minAmp=parms["minAmp"], timeAvg=parms["timeAvg"], \
                                doFD=parms["doAFFD"], FDmaxAmp=parms["FDmaxAmp"], FDmaxV=parms["FDmaxV"], \
                                FDwidMW=parms["FDwidMW"], FDmaxRMS=parms["FDmaxRMS"], \
                                FDmaxRes=parms["FDmaxRes"],  FDmaxResBL=parms["FDmaxResBL"], \
                                FDbaseSel=parms["FDbaseSel"], \
                                nThreads=nThreads, logfile=logFile, check=check, debug=debug)
        if retCode!=0:
           raise  RuntimeError("Error in AutoFlag")

    # Redo the calibration using new flagging?
    if parms["doBPCal2"]==None:
        parms["doBPCal2"] = parms["doBPCal"]
    if parms["doDelayCal2"]==None:
        parms["doDelayCal2"] = parms["doDelayCal2"]
    if parms["doAmpPhaseCal2"]==None:
        parms["doAmpPhaseCal2"] = parms["doAmpPhaseCal"]
    if parms["doAutoFlag2"]==None:
        parms["doAutoFlagCal2"] = parms["doAutoFlag"]
    if parms["doRecal"]:
        mess =  "Redo calibration:"
        printMess(mess, logFile)
        EVLAClearCal(uv, err, doGain=True, doFlag=False, doBP=True, check=check, logfile=logFile)
        OErr.printErrMsg(err, "Error resetting calibration")
        # Parallactic angle correction?
        if parms["doPACor"]:
            retCode = EVLAPACor(uv, err, \
                                logfile=logFile, check=check, debug=debug)
            if retCode!=0:
                raise RuntimeError("Error in Parallactic angle correction")

        # Delay recalibration
        if parms["doDelayCal2"] and parms["DCals"] and not check:
            plotFile = fileRoot+"_DelayCal2.ps"
            retCode = EVLADelayCal(uv, parms["DCals"], err, \
                                   BChan=parms["delayBChan"], EChan=parms["delayEChan"], \
                                   doCalib=2, flagVer=0, doBand=-1, \
                                   solInt=parms["delaySolInt"], smoTime=parms["delaySmoo"],  \
                                   refAnts=[parms["refAnt"]], doTwo=parms["doTwo"], \
                                   doZeroPhs=parms["delayZeroPhs"], \
                                   doPlot=parms["doSNPlot"], plotFile=plotFile, \
                                   nThreads=nThreads, noScrat=noScrat, \
                                   logfile=logFile, check=check, debug=debug)
            if retCode!=0:
                raise RuntimeError("Error in delay calibration")

            # Plot corrected data?
            if parms["doSpecPlot"] and parms["plotSource"]:
                plotFile = fileRoot+"_DelaySpec2.ps"
                retCode = EVLASpectrum(uv, parms["BPCal"], parms["plotTime"], maxgap, plotFile, parms["refAnt"], err, \
                                       Stokes=["RR","LL"], doband=-1,          \
                                       check=check, debug=debug, logfile=logFile )
                if retCode!=0:
                    raise  RuntimeError("Error in Plotting spectrum")

        # Bandpass calibration
        if parms["doBPCal2"] and parms["BPCals"]:
            retCode = KATBPCal(uv, parms["BPCals"], err, noScrat=noScrat, solInt1=parms["bpsolint1"], \
                            solInt2=parms["bpsolint2"], solMode=parms["bpsolMode"], \
                            BChan1=parms["bpBChan1"], EChan1=parms["bpEChan1"], \
                            BChan2=parms["bpBChan2"], EChan2=parms["bpEChan2"], ChWid2=parms["bpChWid2"], \
                            doCenter1=parms["bpDoCenter1"], refAnt=parms["refAnt"], \
                            UVRange=parms["bpUVRange"], doCalib=2, gainUse=0, flagVer=0, doPlot=False, \
                            nThreads=nThreads, logfile=logFile, check=check, debug=debug)
            if retCode!=0:
                raise RuntimeError("Error in Bandpass calibration")
        
            # Plot corrected data?
            if parms["doSpecPlot"] and parms["plotSource"]:
                plotFile = fileRoot+"_BPSpec2.ps"
                retCode = EVLASpectrum(uv, parms["BPCal"], parms["plotTime"], maxgap, plotFile, parms["refAnt"], err, \
                                   Stokes=["RR","LL"], doband=1,          \
                                   check=check, debug=debug, logfile=logFile )
            if retCode!=0:
                raise  RuntimeError("Error in Plotting spectrum")

        # Amp & phase Recalibrate
        if parms["doAmpPhaseCal2"]:
            plotFile = fileRoot+"_APCal2.ps"
            retCode = KATCalAP (uv, [], parms["ACals"], err, PCals=parms["PCals"], \
                                 doCalib=2, doBand=2, BPVer=1, flagVer=0, \
                                 BChan=parms["ampBChan"], EChan=parms["ampEChan"], \
                                 solInt=parms["solInt"], solSmo=parms["solSmo"], ampScalar=parms["ampScalar"], \
                                 doAmpEdit=True, ampSigma=parms["ampSigma"], \
                                 ampEditFG=parms["ampEditFG"], \
                                 doPlot=parms["doSNPlot"], plotFile=plotFile, refAnt=parms["refAnt"], \
                                 noScrat=noScrat, nThreads=nThreads, logfile=logFile, check=check, debug=debug)
            if retCode!=0:
                raise RuntimeError("Error calibrating")

        # More editing
        if parms["doAutoFlag2"]:
            mess =  "Post recalibration editing:"
            printMess(mess, logFile)
            retCode = EVLAAutoFlag (uv, [], err, flagVer=0, flagTab=2, \
                                    doCalib=2, gainUse=0, doBand=1, BPVer=1,  \
                                    IClip=parms["IClip"], minAmp=parms["minAmp"], timeAvg=parms["timeAvg"], \
                                    doFD=parms["doAFFD"], FDmaxAmp=parms["FDmaxAmp"], FDmaxV=parms["FDmaxV"], \
                                    FDwidMW=parms["FDwidMW"], FDmaxRMS=parms["FDmaxRMS"], \
                                    FDmaxRes=parms["FDmaxRes"],  FDmaxResBL= parms["FDmaxResBL"], \
                                    FDbaseSel=parms["FDbaseSel"], \
                                    nThreads=nThreads, logfile=logFile, check=check, debug=debug)
            if retCode!=0:
                raise  RuntimeError("Error in AutoFlag")
    # end recal
    # Calibrate and average data
    if parms["doCalAvg"]:
        retCode = KATCalAvg (uv, avgClass, parms["seq"], parms["CalAvgTime"], err, \
                              flagVer=2, doCalib=2, gainUse=0, doBand=1, BPVer=1, doPol=False, \
                              avgFreq=parms["avgFreq"], chAvg=parms["chAvg"], \
                              BChan=1, EChan=parms["selChan"] - 1, doAuto=parms["doAuto"], \
                              BIF=parms["CABIF"], EIF=parms["CAEIF"], Compress=parms["Compress"], \
                              nThreads=nThreads, logfile=logFile, check=check, debug=debug)
        if retCode!=0:
           raise  RuntimeError("Error in CalAvg")

    # Get calibrated/averaged data
    if not check:
        uv = UV.newPAUV("AIPS UV DATA", EVLAAIPSName(project), avgClass[0:6], \
                        disk, parms["seq"], True, err)
        if err.isErr:
            OErr.printErrMsg(err, "Error creating cal/avg AIPS data")

    KATUVFITS(uv, 'preimage.uvfits', 0, err, exclude=["AIPS HI", "AIPS SL", "AIPS PL"], include=["AIPS AN", "AIPS FQ"], compress=parms["Compress"], logfile=logFile)
    KATUVFITab(uv, 'preimage.uvtab', 0, err)
    # XClip
    if parms["XClip"] and parms["XClip"]>0.0:
        mess =  "Cross Pol clipping:"
        printMess(mess, logFile)
        retCode = EVLAAutoFlag (uv, [], err, flagVer=-1, flagTab=1, \
                                doCalib=2, gainUse=0, doBand=-1, maxBad=1.0,  \
                                XClip=parms["XClip"], timeAvg=1./60., \
                                nThreads=nThreads, logfile=logFile, check=check, debug=debug)
        if retCode!=0:
            raise  RuntimeError("Error in AutoFlag")
    
    # R-L  delay calibration cal if needed,
    if parms["doRLDelay"] and parms["RLDCal"][0][0]!=None:
        if parms["rlrefAnt"]<=0:
            parms["rlrefAnt"] =  parms["refAnt"]
        # parms["rlDoBand"] if before average, BPVer=parms["rlBPVer"], 
        retCode = EVLARLDelay(uv, err,\
                              RLDCal=parms["RLDCal"], BChan=parms["rlBChan"], \
                              EChan=parms["rlEChan"], UVRange=parms["rlUVRange"], \
                              soucode=parms["rlCalCode"], doCalib=parms["rlDoCal"], gainUse=parms["rlgainUse"], \
                              timerange=parms["rltimerange"], \
                              # NOT HERE doBand=parms["rlDoBand"], BPVer=parms["rlBPVer"],  \
                              flagVer=parms["rlflagVer"], \
                              refAnt=parms["rlrefAnt"], doPol=False,  \
                              nThreads=nThreads, noScrat=noScrat, logfile=logFile, \
                              check=check, debug=debug)
        if retCode!=0:
            raise RuntimeError("Error in R-L delay calibration")
    
    # Polarization calibration
    if parms["doPolCal"]:
        if parms["PCRefAnt"]<=0:
            parms["PCRefAnt"] =  parms["refAnt"]
        retCode = EVLAPolCal(uv, parms["PCInsCals"], err, \
                             doCalib=2, gainUse=0, doBand=-1, flagVer=0, \
                             fixPoln=parms["PCFixPoln"], pmodel=parms["PCpmodel"], avgIF=parms["PCAvgIF"], \
                             solInt=parms["PCSolInt"], refAnt=parms["PCRefAnt"], solType=parms["PCSolType"], \
                             ChInc=parms["PCChInc"], ChWid=parms["PCChWid"], \
                             nThreads=nThreads, check=check, debug=debug, noScrat=noScrat, logfile=logFile)
        if retCode!=0 and (not check):
           raise  RuntimeError("Error in polarization calibration: "+str(retCode))
        # end poln cal.
    
    
    # R-L phase calibration cal., creates new BP table
    if parms["doRLCal"] and parms["RLDCal"][0][0]!=None:
        plotFile = fileRoot+"_RLSpec2.ps"
        if parms["rlrefAnt"]<=0:
            parms["rlrefAnt"] =  parms["refAnt"]
        retCode = EVLARLCal(uv, err,\
                            RLDCal=parms["RLDCal"], BChan=parms["rlBChan"],
                            EChan=parms["rlEChan"], UVRange=parms["rlUVRange"], \
                            ChWid2=parms["rlChWid"], solInt1=parms["rlsolint1"], solInt2=parms["rlsolint2"], \
                            RLPCal=parms["RLPCal"], RLPhase=parms["RLPhase"], \
                            RM=parms["RLRM"], CleanRad=parms["rlCleanRad"], \
                            calcode=parms["rlCalCode"], doCalib=parms["rlDoCal"], gainUse=parms["rlgainUse"], \
                            timerange=parms["rltimerange"], FOV=parms["rlFOV"], \
                            doBand=-1, BPVer=1, flagVer=parms["rlflagVer"], \
                            refAnt=parms["rlrefAnt"], doPol=parms["doPol"], PDVer=parms["PDVer"],  \
                            doPlot=parms["doSpecPlot"], plotFile=plotFile, \
                            nThreads=nThreads, noScrat=noScrat, logfile=logFile, \
                            check=check, debug=debug)
        if retCode!=0:
            raise RuntimeError("Error in RL phase spectrum calibration")
    
    # VClip
    if parms["VClip"] and parms["VClip"]>0.0:
        mess =  "VPol clipping:"
        printMess(mess, logFile)
        retCode = EVLAAutoFlag (uv, [], err, flagVer=-1, flagTab=1, \
                                doCalib=2, gainUse=0, doBand=-1,  \
                                VClip=parms["VClip"], timeAvg=parms["timeAvg"], \
                                nThreads=nThreads, logfile=logFile, check=check, debug=debug)
        if retCode!=0:
            raise  RuntimeError("Error in AutoFlag VClip")
    
    # Plot corrected data?
    parms["doSpecPlot"]=True
    if parms["doSpecPlot"] and parms["plotSource"]:
        plotFile = fileRoot+"_Spec.ps"
        retCode = EVLASpectrum(uv, parms["BPCal"], parms["plotTime"], maxgap, \
                               plotFile, parms["refAnt"], err, \
                               Stokes=["I"], doband=-1,          \
                               check=check, debug=debug, logfile=logFile )
        if retCode!=0:
            raise  RuntimeError("Error in Plotting spectrum")
    
    
    # Image targets
    if parms["doImage"]:
        # If targets not specified, image all
        if len(parms["targets"])<=0:
            slist = EVLAAllSource(uv,err,logfile=logFile,check=check,debug=debug)
        else:
            slist = targets
        slist=targets
        KATImageTargets (uv, err, Sources=slist, seq=parms["seq"], sclass=outIClass, OutlierArea=parms["outlierArea"],\
                          doCalib=-1, doBand=-1,  flagVer=-1, doPol=parms["doPol"], PDVer=parms["PDVer"],  \
                          Stokes=parms["Stokes"], FOV=parms["FOV"], Robust=parms["Robust"], Niter=parms["Niter"], \
                          CleanRad=parms["CleanRad"], minFlux=parms["minFlux"], OutlierSize=parms["OutlierSize"], \
                          xCells=parms["xCells"], yCells=parms["yCells"], Reuse=parms["Reuse"], minPatch=parms["minPatch"], \
                          maxPSCLoop=parms["maxPSCLoop"], minFluxPSC=parms["minFluxPSC"], noNeg=parms["noNeg"], \
                          solPInt=parms["solPInt"], solPMode=parms["solPMode"], solPType=parms["solPType"], \
                          maxASCLoop=parms["maxASCLoop"], minFluxASC=parms["minFluxASC"], nx=parms["nx"], ny=parms["ny"], \
                          solAInt=parms["solAInt"], solAMode=parms["solAMode"], solAType=parms["solAType"], \
                          avgPol=parms["avgPol"], avgIF=parms["avgIF"], minSNR = parms["minSNR"], refAnt=parms["refAnt"], \
                          do3D=parms["do3D"], BLFact=parms["BLFact"], BLchAvg=parms["BLchAvg"], \
                          doMB=parms["doMB"], norder=parms["MBnorder"], maxFBW=parms["MBmaxFBW"], \
                          PBCor=parms["PBCor"],antSize=parms["antSize"], autoCen=parms["autoCen"], \
                          nTaper=parms["nTaper"], Tapers=parms["Tapers"], sefd=sefd, \
                          nThreads=nThreads, noScrat=noScrat, logfile=logFile, check=check, debug=False)
        # End image
    
    # Get report on sources
    if parms["doReport"]:
        # If targets not specified, do all
        if len(parms["targets"])<=0:
            slist = EVLAAllSource(uv,err,logfile=logFile,check=check,debug=debug)
        else:
            slist = parms["targets"]
        Report = EVLAReportTargets(uv, err, Sources=slist, seq=parms["seq"], sclass=outIClass, \
                                       Stokes=parms["Stokes"], logfile=logFile, check=check, debug=debug)
        # Save to pickle jar
        ReportPicklefile = fileRoot+"_Report.pickle"   # Where results saved
        SaveObject(Report, ReportPicklefile, True) 
       
    # Write results, cleanup    
    # Save cal/average UV data? 
    if parms["doSaveUV"] and (not check):
        Aname = EVLAAIPSName(project)
        cno = AIPSDir.PTestCNO(disk, user, Aname, avgClass[0:6], "UV", parms["seq"], err)
        if cno>0:
            uvt = UV.newPAUV("AIPS CAL UV DATA", Aname, avgClass, disk, parms["seq"], True, err)
            filename = fileRoot+"_Cal.uvtab"
            KATUVFITS (uv, filename, 0, err, exclude=["AIPS HI", "AIPS SL", "AIPS PL"], include=["AIPS AN", "AIPS FQ"], compress=parms["Compress"], logfile=logFile)
            EVLAAddOutFile(os.path.basename(filename), 'project', "Calibrated Averaged UV data" )
            # Save list of output files
            EVLASaveOutFiles(manifestfile)
            del uvt
    # Imaging results
    # If targets not specified, save all
    if len(parms["targets"])<=0:
        slist = EVLAAllSource(uv,err,logfile=logFile,check=check,debug=debug)
    else:
        slist = parms["targets"]
    for target in slist:
        if parms["doSaveImg"] and (not check):
            for s in parms["Stokes"]:
                oclass = s+outIClass[1:]
                outname = target
                # Test if image exists
                cno = AIPSDir.PTestCNO(disk, user, outname, oclass, "MA", parms["seq"], err)
                #print cno
                if cno <= 0 :
                    continue
                x = Image.newPAImage("out", outname, oclass, disk, parms["seq"], True, err)
                outfilefits = fileRoot+'_'+target+"."+oclass+".fits"
                xf = KATImFITS(x, outfilefits, 0, err, logfile=logFile)
                x = Image.newPAImage("out", outname, oclass, disk, parms["seq"], True, err)
                outfile = fileRoot+'_'+target+"."+oclass+".fittab.fits"
                xf = EVLAImFITS (x, outfile, 0, err, logfile=logFile)
                EVLAAddOutFile(outfile, target, 'Image of '+ target)
                # Statistics
                zz=imstat(x, err, logfile=logFile)
                # Make a Jpeg image
                FITS2jpeg.fits2jpeg(outfilefits,chans=1,contrast=0.05,cmap='jet',area=0.7)
                EVLAAddOutFile(outfile.replace('.fits','.jpeg'), target, 'Jpeg image of '+ target)
    # end writing loop
    
    # Save list of output files
    EVLASaveOutFiles(manifestfile)
    OErr.printErrMsg(err, "Writing output")
    
    # Contour plots
    if parms["doKntrPlots"]:
        mess = "INFO --> Contour plots (doKntrPlots)"
        printMess(mess, logFile)
        EVLAKntrPlots( err, imName=parms["targets"], project=fileRoot,
                       disk=disk, debug=debug )
        # Save list of output files
        EVLASaveOutFiles(manifestfile)
    elif debug:
        mess = "Not creating contour plots ( doKntrPlots = "+str(parms["doKntrPlots"])+ " )"
        printMess(mess, logFile)

    # Source uv plane diagnostic plots
    if parms["doDiagPlots"]:
        mess = "INFO --> Diagnostic plots (doDiagPlots)"
        printMess(mess, logFile)
        # Get the highest number avgClass catalog file
        Aname = EVLAAIPSName( project )
        uvc = None
        if not check:
            uvname = project+"_Cal"
            uvc = UV.newPAUV(uvname, Aname, avgClass, disk, parms["seq"], True, err)
        EVLADiagPlots( uvc, err, cleanUp=parms["doCleanup"], \
                           project=fileRoot, \
                           logfile=logFile, check=check, debug=debug )
        # Save list of output files
        EVLASaveOutFiles(manifestfile)
    elif debug:
        mess = "Not creating diagnostic plots ( doDiagPlots = "+str(parms["doDiagPlots"])+ " )"
        printMess(mess, logFile)
    
    # Save metadata
    srcMetadata = None
    projMetadata = None
    if parms["doMetadata"]:
        mess = "INFO --> Save metadata (doMetadata)"
        printMess(mess, logFile)
        uvc = None
        if not uvc:
            # Get calibrated/averaged data
            Aname = EVLAAIPSName(project)
            uvname = project+"_Cal"
            uvc = UV.newPAUV(uvname, Aname, avgClass, disk, parms["seq"], True, err)
            if err.isErr:
                OErr.printErrMsg(err, "Error creating cal/avg AIPS data")
    
        # Get source metadata; save to pickle file
        srcMetadata = EVLASrcMetadata( uvc, err, Sources=parms["targets"], seq=parms["seq"], \
                                       sclass=outIClass, Stokes=parms["Stokes"],\
                                       logfile=logFile, check=check, debug=debug )
        picklefile = fileRoot+".SrcReport.pickle" 
        SaveObject( srcMetadata, picklefile, True ) 
        EVLAAddOutFile(os.path.basename(picklefile), 'project', 'All source metadata' )
    
        # Get project metadata; save to pickle file
        projMetadata = KATProjMetadata( uvc, AIPS_VERSION, err, \
            PCals=parms["PCals"], ACals=parms["ACals"], \
            BPCals=parms["BPCals"], DCals=parms["DCals"], \
            project = project, band = band, \
            dataInUVF = parms["archRoot"], archFileID = fileRoot )
        picklefile = fileRoot+".ProjReport.pickle"
        SaveObject(projMetadata, picklefile, True) 
        EVLAAddOutFile(os.path.basename(picklefile), 'project', 'Project metadata' )
    else:
        # Fetch from pickle jar
         picklefile = fileRoot+".SrcReport.pickle"
         srcMetadata = FetchObject(picklefile)
         picklefile = fileRoot+".ProjReport.pickle"
         projMetadata = FetchObject(picklefile)
   
    # Write report
    if parms["doHTML"]:
        mess = "INFO --> Write HTML report (doHTML)"
        printMess(mess, logFile)
        KATHTMLReport( projMetadata, srcMetadata, \
                            outfile=fileRoot+"_report.html", \
                            logFile=logFile )
    
    # Write VOTable
    if parms["doVOTable"]:
        mess = "INFO --> Write VOTable (doVOTable)"
        printMess(mess, logFile)
        EVLAAddOutFile( 'VOTable.xml', 'project', 'VOTable report' ) 
        EVLAWriteVOTable( projMetadata, srcMetadata, filename=fileRoot+'_VOTable.xml' )
    
    # Save list of output files
    EVLASaveOutFiles(manifestfile)
    
    # Cleanup - delete AIPS files
    if parms["doCleanup"] and (not check):
        mess = "INFO --> Clean up (doCleanup)"
        printMess(mess, logFile)
        # Delete target images
        # How many Stokes images
        nstok = len(parms["Stokes"])
        for istok in range(0,nstok):
            oclass = parms["Stokes"][istok:istok+1]+outIClass[1:]
            AllDest(err, disk=disk,Aseq=parms["seq"],Aclass=oclass)
        
        # Delete initial UV data
        Aname = EVLAAIPSName(project)
        # Test if data exists
        cno = AIPSDir.PTestCNO(disk, user, Aname, dataClass[0:6], "UV", parms["seq"], err)
        if cno>0:
            uvt = UV.newPAUV("AIPS RAW UV DATA", Aname, dataClass[0:6], disk, parms["seq"], True, err)
            uvt.Zap(err)
            del uvt
            if err.isErr:
                OErr.printErrMsg(err, "Error deleting raw AIPS data")
        # Zap calibrated/averaged data
        # Test if data exists
        cno = AIPSDir.PTestCNO(disk, user, Aname, avgClass[0:6], "UV", parms["seq"], err)
        if cno>0:
            uvt = UV.newPAUV("AIPS CAL UV DATA", Aname, avgClass[0:6], disk, parms["seq"], True, err)
            uvt.Zap(err)
            del uvt
            if err.isErr:
                OErr.printErrMsg(err, "Error deleting cal/avg AIPS data")
        # Zap UnHanned data if present
        loadClass = "Raw"
        # Test if image exists
        cno = AIPSDir.PTestCNO(disk, user, Aname, loadClass[0:6], "UV", parms["seq"], err)
        if cno>0:
            uvt = UV.newPAUV("AIPS CAL UV DATA", Aname, loadClass[0:6], disk, parms["seq"], True, err)
            uvt.Zap(err)
            del uvt
            if err.isErr:
                OErr.printErrMsg(err, "Error deleting cal/avg AIPS data")
        OErr.printErrMsg(err, "Writing output/cleanup")


    # Delete AIPS scratch DA00 and disk
    if os.path.exists(os.environ['DA00']): shutil.rmtree(os.environ['DA00'])
    for disk in ObitTalkUtil.AIPSDir.AIPSdisks:
        if os.path.exists(disk): shutil.rmtree(disk)


    # Shutdown
    mess = "Finished project "+parms["project"]+ \
    " AIPS user no. "+str(AIPS.userno)
    printMess(mess, logFile)
    OErr.printErr(err)
    OSystem.Shutdown(ObitSys)
Exemplo n.º 23
0
def KAT2AIPS (katdata, outUV, disk, fitsdisk, err, \
              calInt=1.0, static=None, **kwargs):
    """Convert MeerKAT MVF data set to an Obit UV.

    This module requires katdat and katpoint and their dependencies
    contact Ludwig Schwardt <*****@*****.**> for details.

    Parameters
    ----------
    katdata : string
        input katdal object
    outUV : ??
        Obit UV object, shoud be a KAT template for the
        appropriate number of IFs and poln.
    disk  : int
        AIPS Disk number
    fitsdisk: int
        FITS Disk number
    err : ??
        Obit error/message stack
    calInt : 
        Calibration interval in min.
    targets : list, optinal
        List of targetnames to extract from the file
    stop_w : bool
        Fring stop data? (Values only for KAT-7)
    """
    ################################################################
    OErr.PLog(err, OErr.Info, "Converting MVF data to AIPS UV format.")
    OErr.printErr(err)
    print("Converting MVF data to AIPS UV format.\n")

    # Extract metadata
    meta = GetKATMeta(katdata, err)

    # TODO: Fix this all up so that the below isn't the case!
    if meta["products"].size != meta["nants"] * meta["nants"] * 4:
        raise ValueError(
            "Only full stokes and all correlation products are supported.")

    # Extract AIPS parameters of the uv data to the metadata
    meta["Aproject"] = outUV.Aname
    meta["Aclass"] = outUV.Aclass
    meta["Aseq"] = outUV.Aseq
    meta["Adisk"] = disk
    meta["calInt"] = calInt
    meta["fitsdisk"] = fitsdisk
    # Update descriptor
    UpdateDescriptor(outUV, meta, err)
    # Write AN table
    WriteANTable(outUV, meta, err)
    # Write FQ table
    WriteFQTable(outUV, meta, err)
    # Write SU table
    WriteSUTable(outUV, meta, err)

    # Convert data
    ConvertKATData(outUV,
                   katdata,
                   meta,
                   err,
                   static=static,
                   blmask=kwargs.get('blmask', 1.e10),
                   stop_w=kwargs.get('stop_w', False),
                   timeav=kwargs.get('timeav', 1),
                   flag=kwargs.get('flag', False),
                   doweight=kwargs.get('doweight', True),
                   doflags=kwargs.get('doflags', True))

    # Index data
    OErr.PLog(err, OErr.Info, "Indexing data")
    OErr.printErr(err)
    UV.PUtilIndex(outUV, err)

    # Open/close UV to update header
    outUV.Open(UV.READONLY, err)
    outUV.Close(err)
    if err.isErr:
        OErr.printErrMsg(err, message="Update UV header failed")

    # initial CL table
    OErr.PLog(err, OErr.Info, "Create Initial CL table")
    OErr.printErr(err)
    print("Create Initial CL table\n")
    UV.PTableCLfromNX(outUV, meta["maxant"], err, calInt=calInt)
    outUV.Open(UV.READONLY, err)
    outUV.Close(err)
    if err.isErr:
        OErr.printErrMsg(err, message="Update UV header failed")

    # History
    outHistory = History.History("outhistory", outUV.List, err)
    outHistory.Open(History.READWRITE, err)
    outHistory.TimeStamp("Convert MeerKAT MVF data to Obit", err)
    for name in katdata.name.split(','):
        for line in _history_wrapper.wrap("datafile = " + name):
            outHistory.WriteRec(-1, line, err)
    outHistory.WriteRec(-1, "calInt   = " + str(calInt), err)
    outHistory.Close(err)
    outUV.Open(UV.READONLY, err)
    outUV.Close(err)
    if err.isErr:
        OErr.printErrMsg(err, message="Update UV header failed")
    # Return the metadata for the pipeline
    return meta
Exemplo n.º 24
0
def SelectCC(im, inCC, outCC, radius, peelPos, err):
    """
    Select/copy CCs more than radius from  peelPos
    
    This generates a CC table which can be subtracted from the uv data
    and remove all sources but the peel source area.
    * im     = Python Image with CC Tables
    * inCC   = input CC version
    * outCC  = output CC version
    * radius = radius (deg) of zone of exclusion
    * peelPos= [RA, Dec] in deg.
    * err    = Python Obit Error/message stack
    """
    ################################################################
    # Checks
    if not Image.PIsA(im):
        raise TypeError("im MUST be a Python Obit Image")
    if not OErr.OErrIsA(err):
        raise TypeError("err MUST be an OErr")
    #
    # Geometry
    xref = im.Desc.Dict['crval'][0]
    yref = im.Desc.Dict['crval'][1]
    xrefpix = im.Desc.Dict['crpix'][0]
    yrefpix = im.Desc.Dict['crpix'][1]
    xinc = abs(im.Desc.Dict['cdelt'][0])
    yinc = im.Desc.Dict['cdelt'][1]
    rot = im.Desc.Dict['crota'][1]
    imtype = im.Desc.Dict['ctype'][0][4:]
    # Input CC
    inTab = im.NewTable(Table.READONLY, "AIPS CC", inCC, err)
    if err.isErr:
        OErr.printErrMsg(err, "Error finding input CC Table")
        return
    # Output CC
    nrow = inTab.Desc.Dict['nrow']
    noParms = inTab.Desc.List.Dict['NO_PARMS'][2][0]
    outTab = im.NewTable(Table.WRITEONLY, "AIPS CC", outCC, err, \
                             noParms = noParms)
    if err.isErr:
        OErr.printErrMsg(err, "Error creating output CC Table")
        return
    # Open
    inTab.Open(Table.READONLY, err)
    outTab.Open(Table.WRITEONLY, err)
    if err.isErr:
        OErr.printErrMsg(err, "Error opening CC Tables")
        return
    orow = 1
    count = 0
    sumf = 0.0
    OErr.PLog(err, OErr.Info, "Excluding:")
    for irow in range(1, nrow + 1):
        row = inTab.ReadRow(irow, err)
        # Want this one?
        dx = row['DELTAX'][0]
        dy = row['DELTAY'][0]
        [ierr, xpos, ypos] = SkyGeom.PWorldPosLM(dx, dy, xref, yref, xinc,
                                                 yinc, rot, imtype)
        # Small angle approximation
        dra = (xpos - peelPos[0]) * cos(radians(xpos))
        delta = ((dra)**2 + (ypos - peelPos[1])**2)**0.5
        if delta > radius:
            outTab.WriteRow(orow, row, err)
            orow += 1
        else:
            #print irow,xpos,ypos
            count += 1
            sumf += row['FLUX'][0]
            ras = ImageDesc.PRA2HMS(xpos)
            decs = ImageDesc.PDec2DMS(ypos)
            OErr.PLog(err, OErr.Info,
                      "%6d %s %s flux= %f" % (irow, ras, decs, row['FLUX'][0]))
    # End loop
    OErr.PLog(err, OErr.Info, "Drop %6d CCs, sum flux= %f" % (count, sumf))
    OErr.printErr(err)
    inTab.Close(err)
    outTab.Close(err)
    if err.isErr:
        OErr.printErrMsg(err, "Error copying CC Table")
        return
Exemplo n.º 25
0
def RestorePeel(peelMod, CCVer, image, err):
    """
    Restore CCs from one image onto another
    
    If images are ImageMF then multiple planes restored.
    * peelMod   Image with CC table (as Image)
    * CCver     CC version on peelMod to restore
    * image     Output Image to which components to be added
    * err       Python Obit Error/message stack
    * nThreads  number of threads to use
    """
    ################################################################
    import Obit, Image, ImageDesc, SkyGeom, Table, History, OErr
    import UV, UVDesc, OSystem, UVSelfCal, FArray
    # Checks
    if not Image.PIsA(peelMod):
        raise TypeError("uv MUST be a Python Obit Image")
    if not Image.PIsA(image):
        raise TypeError("imp MUST be a Python Obit Image")

    ph = peelMod.Desc.Dict
    ih = image.Desc.Dict  # Descriptors
    ph_x = ph['crval'][0]
    ph_y = ph['crval'][1]
    ph_rot = ph['crota'][1]
    ph_dx = ph['cdelt'][0]
    ph_dy = ph['cdelt'][1]
    ph_type = ph['ctype'][0][4:]
    ph_ref_x = ph['crpix'][0]
    ph_ref_y = ph['crpix'][1]
    ih_x = ih['crval'][0]
    ih_y = ih['crval'][1]
    ih_rot = ih['crota'][1]
    ih_dx = ih['cdelt'][0]
    ih_dy = ih['cdelt'][1]
    ih_type = ih['ctype'][0][4:]
    ih_ref_x = ih['crpix'][0]
    ih_ref_y = ih['crpix'][1]
    bmaj = ih['beamMaj']
    bmin = ih['beamMin']
    bpa = ih['beamPA']
    bmaj /= abs(ih_dx)
    bmin /= abs(ih_dx)  # Beam in pixels (square grid)

    #Beam to insert
    beam = FArray.FArray('beam', naxis=[21, 21])
    bx = 10.
    by = 10.
    FArray.PEGauss2D(beam, 1.0, [bx, by], [bmaj, bmin, bpa])

    cctab = peelMod.NewTable(Table.READONLY, 'AIPS CC', CCVer, err)  # CC Table
    cctab.Open(Table.READONLY, err)
    OErr.printErr(err)

    # First plane
    image.GetPlane(None, [1, 1, 1, 1, 1], err)
    imArr = image.FArray
    ncc = cctab.Desc.Dict['nrow']  # Number of CCs
    for irow in range(1, ncc + 1):
        row = cctab.ReadRow(irow, err)
        flux = row['FLUX'][0]
        dx = row['DELTAX'][0]
        dy = row['DELTAY'][0]
        dz = row['DELTAZ'][0]
        [ierr, ra, dec] = SkyGeom.PWorldPosLM(dx, dy, ph_x, ph_y, ph_dx, ph_dy,
                                              ph_rot, ph_type)
        # output pixel
        [ierr, xpix,
         ypix] = SkyGeom.PXYpix(ra, dec, ih_x, ih_y, ih_ref_x, ih_ref_y, ih_dx,
                                ih_dy, ih_rot, ih_type)
        # Need beam if correct location
        tbx = xpix - int(xpix + 0.5)
        tby = ypix - int(ypix + 0.5)
        FArray.PFill(beam, 0.0)
        FArray.PEGauss2D(beam, 1.0, [bx + tbx, by + tby], [bmaj, bmin, bpa])
        # Add
        FArray.PShiftAdd(
            imArr,
            [int(xpix + 0.5) - 1, int(ypix + 0.5) - 1], beam,
            [int(bx + 0.5), int(by + 0.5)], flux, imArr)
    # end loop
    image.PutPlane(None, [1, 1, 1, 1, 1], err)  # rewrite

    # MFImage Planes
    if ih['ctype'][2] == 'SPECLNMF ':
        nterm = image.Desc.List.Dict['NTERM'][2][0]
        nspec = image.Desc.List.Dict['NSPEC'][2][0]
        for ip in range(nterm + 1, nterm + nspec + 1):
            image.GetPlane(None, [ip, 1, 1, 1, 1], err)
            imArr = image.FArray
            for irow in range(1, ncc + 1):
                row = cctab.ReadRow(irow, err)
                flux = row['PARMS'][3 + ip - nterm]
                dx = row['DELTAX'][0]
                dy = row['DELTAY'][0]
                dz = row['DELTAZ'][0]
                [ierr, ra,
                 dec] = SkyGeom.PWorldPosLM(dx, dy, ph_x, ph_y, ph_dx, ph_dy,
                                            ph_rot, ph_type)
                # output pixel
                [ierr, xpix, ypix] = SkyGeom.PXYpix(ra, dec, ih_x, ih_y,
                                                    ih_ref_x, ih_ref_y, ih_dx,
                                                    ih_dy, ih_rot, ih_type)
                # Need beam in correct location
                tbx = xpix - int(xpix + 0.5)
                tby = ypix - int(ypix + 0.5)
                FArray.PFill(beam, 0.0)
                FArray.PEGauss2D(beam, 1.0, [bx + tbx, by + tby],
                                 [bmaj, bmin, bpa])
                # Add
                FArray.PShiftAdd(imArr,
                                 [int(xpix + 0.5) - 1,
                                  int(ypix + 0.5) - 1], beam,
                                 [int(bx + 0.5), int(by + 0.5)], flux, imArr)
            # end loop
            image.PutPlane(None, [ip, 1, 1, 1, 1], err)  # rewrite
    # end plane loop
    # end MFImage Planes
    # History
    x = image
    y = peelMod
    hi = x.History(3, err)
    hi.Open(3, err)
    hi.TimeStamp('Restore Peel', err)
    if x.FileType == 'FITS':
        hiCard = y.FileName.strip() + '.' + str(y.Disk)
    else:
        hiCard = y.Aname.strip() + '.' + y.Aclass.strip() + '.' + str(
            y.Aseq) + '.' + str(y.Disk)

    hi.WriteRec(-1, "RestorePeel / file=" + hiCard, err)
    hi.Close(err)
Exemplo n.º 26
0
def SubPeel(uv, source, imp, uvp, err, addBack=False, seq=999, \
                flagVer=0, nThreads=1, doGPU=False, noScrat=[0,0,0], taskLog='', debug=False):
    """
    Subtract Peel model w/ solutions, then optionally add back w/o corruptions
    
    UV data should have calibration tables from self calibration
    Output data will be on the same disk as the input, seq=seq, class='PelSub' and
    with name = source (up to 12 char).
    Returns Peel source subtracted/replaced data
    * uv        Dataset with cal tables
                Needs at least the self cal gain table
    * source    source name
    * imp       Peel source model (CC table from ImagePeel)
    * uvp       UV data the result of peel (ImagePeel)
    * err       Python Obit Error/message stack
    * seq       Sequence number for output
    * addBack   Add model back to data w/o corruptions? Not recommended.
    * flagVer   FG table to apply, -1=> no flag
    * nThreads  number of threads to use
    * doGPU     Use GPU if available?
    * noScrat   AIPS disks not to use for scratch
    * taskLog   Log file
    * debug     If True leave debug Input file in /tmp
    """
    ################################################################
    # Checks
    if not UV.PIsA(uv):
        raise TypeError("uv MUST be a Python Obit UV data")
    if not Image.PIsA(imp):
        raise TypeError("imp MUST be a Python Obit Image")
    if not OErr.OErrIsA(err):
        raise TypeError("err MUST be an OErr")
    # Split main data set
    OErr.PLog(err, OErr.Info, "Copy data")
    OErr.printErr(err)
    split = ObitTask('Split')
    setname(uv, split)
    split.outDisk = split.inDisk
    split.outSeq = seq
    split.outClass = 'UPeel'
    split.Sources[0] = source
    split.flagVer = flagVer
    if uv.GetHighVer('AIPS SN') > 0:
        split.doCalib = 2
        split.gainUse = 0
        split.doCalWt = True
    else:
        split.doCalib = -1
    split.taskLog = taskLog
    split.debug = debug
    split.g
    outClass = split.outClass
    outDisk = split.outDisk
    outSeq = split.outSeq

    # Get data
    OErr.PLog(err, OErr.Info, "Make Peel model with corruptions")
    OErr.printErr(err)
    if UV.AExist(source[0:12], outClass, outDisk, outSeq, err):
        datauv = UV.newPAUV('data', source[0:12], outClass, outDisk, outSeq,
                            True, err)
    else:
        datauv = UV.newPAUV('data', source[0:8], outClass, outDisk, outSeq,
                            True, err)
    # Make data set with the model peel source with peel cal applied
    uvsub = ObitTask('UVSub')
    setname(uv, uvsub)
    uvsub.outName = source[0:12]
    uvsub.outDisk = uvsub.inDisk
    uvsub.outSeq = 1
    uvsub.outClass = 'Model'
    uvsub.Sources[0] = source
    uvsub.flagVer = flagVer
    uvsub.doCalib = -1
    uvsub.gainUse = 0
    set2name(imp, uvsub)
    uvsub.CCVer = 1
    uvsub.nfield = 1
    uvsub.Cmethod = 'DFT'
    uvsub.Opcode = 'MODL'
    uvsub.PBCor = False
    uvsub.noScrat = noScrat
    uvsub.noNeg = False
    uvsub.taskLog = taskLog
    uvsub.nThreads = nThreads
    uvsub.doGPU = doGPU
    uvsub.debug = debug
    uvsub.g

    # Get model data
    modeluv = UV.newPAUV('model', uvsub.outName, uvsub.outClass, uvsub.outDisk,
                         uvsub.outSeq, True, err)
    # Copy/invert/unblank SN table from peeluv
    hiPeelSN = uvp.GetHighVer('AIPS SN')
    inTab = max(1, hiPeelSN)
    sntab = uvp.NewTable(Table.READONLY, 'AIPS SN', inTab, err)
    z = UVSelfCal.PInvertSN(sntab, modeluv, 1, True, err)
    # Apply calibration table in subtract
    modeluv.List.set('doCalSelect', True)
    modeluv.List.set('doCalib', 2)
    modeluv.List.set('gainUse', 1)
    modeluv.List.set('passAll', True)

    # Subtract model from main data
    OErr.PLog(err, OErr.Info, "Subtract Corrupted Peel model from uv data")
    UV.PUtilVisSub(datauv, modeluv, datauv, err)
    OErr.printErr(err)

    # Add model without corrupting calibration
    if addBack:
        OErr.PLog(err, OErr.Info, "Add Peel model without corruptions")
        uvsub = ObitTask('UVSub')
        setname(datauv, uvsub)
        setoname(datauv, uvsub)
        uvsub.outSeq = uvsub.inSeq + 1
        uvsub.Sources[0] = source
        uvsub.flagVer = flagVer
        uvsub.doCalib = -1
        uvsub.gainUse = 0
        set2name(imp, uvsub)
        uvsub.CCVer = 1
        uvsub.nfield = 1
        uvsub.Cmethod = 'DFT'
        uvsub.Factor = -1.
        uvsub.PBCor = False
        uvsub.noScrat = noScrat
        uvsub.noNeg = False
        uvsub.taskLog = taskLog
        uvsub.nThreads = nThreads
        uvsub.doGPU = doGPU
        uvsub.debug = debug
        uvsub.g
        outClass = uvsub.outClass
        outDisk = uvsub.outDisk
        outSeq = uvsub.outSeq
        # end add back
    OErr.printErr(err)
    # Delete model dataset
    if not debug:
        modeluv.Zap(err)

# final data
    if UV.AExist(source[0:12], outClass, outDisk, outSeq, err):
        datauv2 = UV.newPAUV('data', source[0:12], outClass, outDisk, outSeq,
                             True, err)
    else:
        datauv2 = UV.newPAUV('data', source[0:8], outClass, outDisk, outSeq,
                             True, err)
    return datauv2
Exemplo n.º 27
0
outHistory.WriteRec(-1,ObitSys.pgmName+" inFile = "+inFile,err)
outHistory.WriteRec(-1,ObitSys.pgmName+" FOV = "+str(FOV),err)
outHistory.WriteRec(-1,ObitSys.pgmName+" Stokes = "+Stokes,err)
outHistory.WriteRec(-1,ObitSys.pgmName+" TimeRange = "+str(TimeRange),err)
outHistory.WriteRec(-1,ObitSys.pgmName+" UVRange = "+str(UVRange),err)
outHistory.WriteRec(-1,ObitSys.pgmName+" Robust = "+str(Robust),err)
outHistory.WriteRec(-1,ObitSys.pgmName+" UVTaper = "+str(UVTaper),err)
outHistory.Close(err)
OErr.printErrMsg(err, "Error with history")

# output image
outImage  = Image.newPFImage("Output image", outFile,  outDisk,  False, err)
Image.PClone(tmpImage, outImage, err)   # Same structure etc.

# Copy to quantized integer image with history
print "Write output image"
inHistory  = History.History("history", tmpImage.List, err)
Image.PCopyQuantizeFITS (tmpImage, outImage, err, inHistory=inHistory)

# Compare with master lie [rms diff, max abs diff, max. master]
masterImage  = Image.newPFImage("Master image",   masterFile,  masterDisk,  True, err)
diff = Image.PCompare(outImage, masterImage, err);
print "Comparison, rel. max. residual",diff[1]/diff[0], " rel RMS residual",diff[2]/diff[0]

# Say something
print "CLEAN: Dirty Image ",inFile,"Clean",outFile

# Shutdown Obit
OErr.printErr(err)
OSystem.Shutdown(ObitSys)
Exemplo n.º 28
0
Arquivo: VLACal.py Projeto: mauch/Obit
def VLAUVLoad(filename, inDisk, Aname, Aclass, Adisk, Aseq, err, logfile=''):
    """ Read FITS uvtab file into AIPS

    Read a UVTAB FITS UV data file and write an AIPS data set
    filename   = name of FITS file
    inDisk     = FITS directory number
    Aname      = AIPS name of file
    Aclass     = AIPS class of file
    Aseq       = AIPS sequence number of file, 0=> create new
    Adisk      = FITS directory number
    err        = Python Obit Error/message stack
    logfile    = logfile for messages
    returns AIPS UV data object
    """
    ################################################################
    #
    # Checks
    if not OErr.OErrIsA(err):
        raise TypeError, "err MUST be an OErr"
    #
    # Get input
    inUV = UV.newPFUV("FITS UV DATA", filename, inDisk, True, err)
    if err.isErr:
        OErr.printErrMsg(err, "Error with FITS data")
    # Get output, create new if seq=0
    if Aseq < 1:
        OErr.printErr(err)  # Print any outstanding messages
        user = OSystem.PGetAIPSuser()
        Aseq = AIPSDir.PHiSeq(Adisk, user, Aname, Aclass, "MA", err)
        # If it already exists, increment seq
        if AIPSDir.PTestCNO(Adisk, user, Aname, Aclass, "MA", Aseq, err) > 0:
            Aseq = Aseq + 1
        OErr.PClear(err)  # Clear any message/error
    mess = "Creating AIPS UV file " + Aname + "." + Aclass + "." + str(
        Aseq) + " on disk " + str(Adisk)
    printMess(mess, logfile)
    outUV = UV.newPAUV("AIPS UV DATA", Aname, Aclass, Adisk, Aseq, False, err)
    if err.isErr:
        OErr.printErrMsg(err, "Error creating AIPS data")
    # Copy
    UV.PCopy(inUV, outUV, err)
    if err.isErr:
        OErr.printErrMsg(err, "Error copying UV data to AIPS")
    # Copy History
    inHistory = History.History("inhistory", inUV.List, err)
    outHistory = History.History("outhistory", outUV.List, err)
    History.PCopyHeader(inHistory, outHistory, err)
    # Add history
    outHistory.Open(History.READWRITE, err)
    outHistory.TimeStamp(" Start Obit uvlod", err)
    outHistory.WriteRec(
        -1, "uvlod   / FITS file " + filename + " disk " + str(inDisk), err)
    outHistory.Close(err)
    #
    # Copy Tables
    exclude = [
        "AIPS HI", "AIPS AN", "AIPS FQ", "AIPS SL", "AIPS PL", "History"
    ]
    include = []
    UV.PCopyTables(inUV, outUV, exclude, include, err)
    return outUV  # return new object
Exemplo n.º 29
0
def pipeline(scriptName, aipsSetup, parmFile):
    """
    Linear (feed) Polarization Continuum pipeline.
    
    * *scriptName* = this script file name
    * *aipsSetup*  = AIPS setup file
    * *parmFile*   = pipeline input parameters file
    """
    ############################# Initialize OBIT ##########################################
    noScrat = []
    exec(open(aipsSetup).read())
    EVLAAddOutFile(aipsSetup, 'project', "Obit's AIPS setup file")

    ############################# Default parameters ##########################################

    # Initialize parameters
    parms = EVLAInitContParms()

    # Linear feed specific parameters
    # Relative gain calibration
    parms["doXYRelGain"] = False  # constrain relative X, Y gain amplitudes
    parms[
        "doXYRelGain2"] = None  # constrain relative X, Y gain amplitudes, 2nd pass
    parms["XYRelGainCal"] = None  # X/Y gain calibrator structure
    parms["XYRelGainCalTime"] = [0., 0.
                                 ]  # Time range for relative gain calibration

    ############################# Set Project Processing parameters ##################
    print("parmFile", parmFile)
    exec(open(parmFile).read())
    EVLAAddOutFile(parmFile, 'project', 'Pipeline input parameters')

    # frequency/configuration dependent default parameters
    EVLAInitContFQParms(parms)

    # Linear feed specific:
    parms["avgPol"] = parms["doXYRelGain"] and (parms["XYRelGainCal"] != None
                                                )  # Fix relative X/Y gains?

    # General data parameters
    band = parms["band"]  # Observing band
    dataClass = ("UVDa" + band)[0:6]  # AIPS class of raw uv data
    project = parms["project"][
        0:12]  # Project name (12 char or less, used as AIPS Name)
    session = parms["session"]  # Project session code

    ################################## Process #####################################
    fileRoot = parms["project"] + "_" + parms["session"] + "_" + parms[
        "band"]  # root of file name
    logFile = fileRoot + ".log"  # Processing log file
    uv = None
    uvc = None
    avgClass = ("UVAv" + band)[0:6]  # Averaged data AIPS class
    outIClass = parms["outIClass"]  # image AIPS class

    # Load the outputs pickle jar
    EVLAFetchOutFiles()

    # Logging directly to logFile
    OErr.PInit(err, parms["prtLv"], logFile)
    OSystem.PAllowThreads(nThreads)  # Allow threads in Obit/oython
    retCode = 0
    EVLAAddOutFile(logFile, 'project', 'Pipeline log file')

    mess = "Start project "+parms["project"]+" session "+parms["session"]+\
           " "+parms["band"]+" Band"+" AIPS user no. "+str(AIPS.userno)+\
           ", EVLA configuration "+parms["VLACfg"]
    printMess(mess, logFile)
    if debug:
        pydoc.ttypager = pydoc.plainpager  # don't page task input displays
        mess = "Using Debug mode "
        printMess(mess, logFile)
    if check:
        mess = "Only checking script"
        printMess(mess, logFile)

    # Log parameters
    printMess("Parameter settings", logFile)
    for p in parms:
        mess = "  " + p + ": " + str(parms[p])
        printMess(mess, logFile)

    # Save parameters to pickle jar, manifest
    ParmsPicklefile = project + "_" + session + "_" + band + ".Parms.pickle"  # Where results saved
    SaveObject(parms, ParmsPicklefile, True)
    EVLAAddOutFile(ParmsPicklefile, 'project', 'Processing parameters used')

    # Are we going to be doing Hanning?
    if parms["doHann"]:
        loadClass = parms["band"] + "Raw"
    else:
        loadClass = dataClass

    # Load Data from Archive directory
    if parms["doLoadArchive"]:
        uv = EVLAUVLoadArch(parms["archRoot"], EVLAAIPSName(project, session), loadClass, disk, parms["seq"], err, \
                            selConfig=parms["selConfig"], doSwPwr=parms["doSwPwr"], \
                            selBand=parms["selBand"], selChan=parms["selChan"], \
                            selNIF=parms["selNIF"], calInt=parms["calInt"], \
                            logfile=logFile, Compress=parms["Compress"], check=check, debug=debug)
        if uv == None and not check:
            raise RuntimeError("Cannot load " + parms["DataRoot"])

    # Hanning
    if parms["doHann"]:
        # Set uv if not done
        if uv == None and not check:
            uv = UV.newPAUV("AIPS UV DATA", EVLAAIPSName(project, session),
                            loadClass[0:6], disk, parms["seq"], True, err)
            if err.isErr:
                OErr.printErrMsg(err, "Error creating AIPS data")

        uv = EVLAHann(uv, EVLAAIPSName(project, session), dataClass, disk, parms["seq"], err, \
                      doDescm=parms["doDescm"], logfile=logFile, check=check, debug=debug)
        if uv == None and not check:
            raise RuntimeError("Cannot Hann data ")

    # Set uv if not done
    if uv == None and not check:
        uv = UV.newPAUV("AIPS UV DATA", EVLAAIPSName(project, session), dataClass[0:6], \
                        disk, parms["seq"], True, err)
        if err.isErr:
            OErr.printErrMsg(err, "Error creating AIPS data")

    # Save file names in history
    EVLAScriptHistory(uv, scriptName, aipsSetup, parmFile, err)
    if err.isErr:
        OErr.printErrMsg(err, "Error writing file names to history")

    # Clear any old calibration/editing
    if parms["doClearTab"]:
        mess = "Clear previous calibration"
        printMess(mess, logFile)
        EVLAClearCal(uv,
                     err,
                     doGain=parms["doClearGain"],
                     doFlag=parms["doClearFlag"],
                     doBP=parms["doClearBP"],
                     check=check)
        OErr.printErrMsg(err, "Error resetting calibration")

    # Copy FG 1 to FG 2
    if parms["doCopyFG"]:
        mess = "Copy FG 1 to FG 2"
        printMess(mess, logFile)
        retCode = EVLACopyFG(uv,
                             err,
                             logfile=logFile,
                             check=check,
                             debug=debug)
        if retCode != 0:
            raise RuntimeError("Error Copying FG table")

    # Drop end channels of spectra?  Only if new FG 2
    if parms["doCopyFG"] and (parms["BChDrop"] > 0) or (parms["EChDrop"] > 0):
        # Channels based on original number, reduced if Hanning
        nchan = uv.Desc.Dict["inaxes"][uv.Desc.Dict["jlocf"]]
        fact = parms["selChan"] / nchan  # Hanning reduction factor
        BChDrop = parms["BChDrop"] / fact
        EChDrop = parms["EChDrop"] / fact
        mess = "Trim %d channels from start and %d from end of each spectrum" % (
            BChDrop, EChDrop)
        printMess(mess, logFile)
        retCode = EVLADropChan (uv, BChDrop, EChDrop, err, flagVer=parms["editFG"], \
                                logfile=logFile, check=check, debug=debug)
        if retCode != 0:
            raise RuntimeError("Error Copying FG table")

    # Special editing
    if parms["doEditList"] and not check:
        mess = "Special editing"
        printMess(mess, logFile)
        for edt in parms["editList"]:
            UV.PFlag(uv,err,timeRange=[dhms2day(edt["timer"][0]),dhms2day(edt["timer"][1])], \
                         flagVer=parms["editFG"], Ants=edt["Ant"], Chans=edt["Chans"], IFs=edt["IFs"], \
                         Stokes=edt["Stokes"], Reason=edt["Reason"])
            OErr.printErrMsg(err, "Error Flagging")

    # Quack to remove data from start and end of each scan
    if parms["doQuack"]:
        retCode = EVLAQuack (uv, err, begDrop=parms["quackBegDrop"], endDrop=parms["quackEndDrop"], \
                             Reason=parms["quackReason"], \
                             logfile=logFile, check=check, debug=debug)
        if retCode != 0:
            raise RuntimeError("Error Quacking data")

    # Flag antennas shadowed by others?
    if parms["doShad"]:
        retCode = EVLAShadow (uv, err, shadBl=parms["shadBl"], \
                              logfile=logFile, check=check, debug=debug)
        if retCode != 0:
            raise RuntimeError("Error Shadow flagging data")

    # Median window time editing, for RFI impulsive in time
    if parms["doMedn"]:
        mess = "Median window time editing, for RFI impulsive in time:"
        printMess(mess, logFile)
        retCode = EVLAMedianFlag (uv, "    ", err, noScrat=noScrat, nThreads=nThreads, \
                                  avgTime=parms["avgTime"], avgFreq=parms["avgFreq"],  chAvg= parms["chAvg"], \
                                  timeWind=parms["timeWind"], flagVer=2,flagSig=parms["mednSigma"], \
                                  logfile=logFile, check=check, debug=False)
        if retCode != 0:
            raise RuntimeError("Error in MednFlag")

    # Median window frequency editing, for RFI impulsive in frequency
    if parms["doFD1"]:
        mess = "Median window frequency editing, for RFI impulsive in frequency:"
        printMess(mess, logFile)
        retCode = EVLAAutoFlag (uv, "    ", err,  flagVer=2, doCalib=-1, doBand=-1,   \
                                timeAvg=parms["FD1TimeAvg"], \
                                doFD=True, FDmaxAmp=1.0e20, FDmaxV=1.0e20, FDwidMW=parms["FD1widMW"],  \
                                FDmaxRMS=[1.0e20,0.1], FDmaxRes=parms["FD1maxRes"],  \
                                FDmaxResBL= parms["FD1maxRes"],  \
                                nThreads=nThreads, logfile=logFile, check=check, debug=debug)
        if retCode != 0:
            raise RuntimeError("Error in AutoFlag")

    # RMS/Mean editing for calibrators
    if parms["doRMSAvg"]:
        mess = "RMS/Mean editing for calibrators:"
        printMess(mess, logFile)
        clist = []  # Calibrator list
        for s in parms["ACals"]:
            if s['Source'] not in clist:
                clist.append(s['Source'])
        for s in parms["PCals"]:
            if s['Source'] not in clist:
                clist.append(s['Source'])
        for s in parms["DCals"]:
            if s['Source'] not in clist:
                clist.append(s['Source'])
        retCode = EVLAAutoFlag (uv, clist, err,  flagVer=2, doCalib=-1, doBand=-1,   \
                                    RMSAvg=parms["RMSAvg"], timeAvg=parms["RMSTimeAvg"], \
                                    nThreads=nThreads, logfile=logFile, check=check, debug=debug)
        if retCode != 0:
            raise RuntimeError("Error in AutoFlag")

    # Need to find a reference antenna?  See if we have saved it?
    if (parms["refAnt"] <= 0):
        refAnt = FetchObject(project + "_" + session + "_" + band +
                             ".refAnt.pickle")
        if refAnt:
            parms["refAnt"] = refAnt
    # Use bandpass calibrator and center half of each spectrum
    if parms["refAnt"] <= 0:
        mess = "Find best reference antenna: run Calib on BP Cal(s) "
        printMess(mess, logFile)
        parms["refAnt"] = EVLAGetRefAnt(uv, parms["BPCals"], err, flagVer=2, \
                                        solInt=parms["bpsolint1"], nThreads=nThreads, \
                                        logfile=logFile, check=check, debug=debug)
        if err.isErr:
            raise RuntimeError("Error finding reference antenna")
        if parms["refAnts"][0] <= 0:
            parms["refAnts"][0] = parms["refAnt"]
        mess = "Picked reference antenna " + str(parms["refAnt"])
        printMess(mess, logFile)
        # Save it
        ParmsPicklefile = project + "_" + session + "_" + band + ".Parms.pickle"  # Where results saved
        SaveObject(parms, ParmsPicklefile, True)
        refAntPicklefile = project + "_" + session + "_" + band + ".refAnt.pickle"  # Where results saved
        SaveObject(parms["refAnt"], refAntPicklefile, True)

    # Plot Raw, edited data?
    if parms["doRawSpecPlot"] and parms["plotSource"]:
        mess = "Raw Spectral plot for: " + parms["plotSource"]
        printMess(mess, logFile)
        plotFile = "./" + fileRoot + "RawSpec.ps"
        retCode = EVLASpectrum(uv, parms["plotSource"], parms["plotTime"], plotFile, parms["refAnt"], err, \
                               Stokes=["RR","LL"], doband=-1,          \
                               check=check, debug=debug, logfile=logFile )
        if retCode != 0:
            raise RuntimeError("Error in Plotting spectrum")
        EVLAAddOutFile(plotFile, 'project', 'Pipeline log file')

    # delay calibration
    if parms["doDelayCal"] and parms["DCals"] and not check:
        plotFile = "./" + fileRoot + "DelayCal.ps"
        retCode = EVLADelayCal(uv, parms["DCals"], err,  \
                               BChan=parms["delayBChan"], EChan=parms["delayEChan"], \
                               doCalib=2, flagVer=2, doBand=-1, \
                               solInt=parms["delaySolInt"], smoTime=1.0/60.0,  \
                               refAnts=[parms["refAnt"]], doTwo=parms["doTwo"],
                               doZeroPhs=parms["delayZeroPhs"], \
                               doPlot=parms["doSNPlot"], plotFile=plotFile, \
                               nThreads=nThreads, noScrat=noScrat, \
                               logfile=logFile, check=check, debug=debug)
        if retCode != 0:
            raise RuntimeError("Error in delay calibration")

        # Plot corrected data?
        if parms["doSpecPlot"] and parms["plotSource"]:
            plotFile = "./" + fileRoot + "DelaySpec.ps"
            retCode = EVLASpectrum(uv, parms["plotSource"], parms["plotTime"], \
                                   plotFile, parms["refAnt"], err, \
                                   Stokes=["RR","LL"], doband=-1,          \
                                   check=check, debug=debug, logfile=logFile )
            if retCode != 0:
                raise RuntimeError("Error in Plotting spectrum")

    # Bandpass calibration
    if parms["doBPCal"] and parms["BPCals"]:
        retCode = EVLABPCal(uv, parms["BPCals"], err, noScrat=noScrat, solInt1=parms["bpsolint1"], \
                            solInt2=parms["bpsolint2"], solMode=parms["bpsolMode"], \
                            BChan1=parms["bpBChan1"], EChan1=parms["bpEChan1"], \
                            BChan2=parms["bpBChan2"], EChan2=parms["bpEChan2"], ChWid2=parms["bpChWid2"], \
                            doCenter1=parms["bpDoCenter1"], refAnt=parms["refAnt"], \
                            UVRange=parms["bpUVRange"], doCalib=2, gainUse=0, flagVer=2, doPlot=False, \
                            nThreads=nThreads, logfile=logFile, check=check, debug=debug)
        if retCode != 0:
            raise RuntimeError("Error in Bandpass calibration")

        # Plot corrected data?
        if parms["doSpecPlot"] and parms["plotSource"]:
            plotFile = "./" + fileRoot + "BPSpec.ps"
            retCode = EVLASpectrum(uv, parms["plotSource"], parms["plotTime"], plotFile, \
                                   parms["refAnt"], err, Stokes=["RR","LL"], doband=1,          \
                                   check=check, debug=debug, logfile=logFile )
            if retCode != 0:
                raise RuntimeError("Error in Plotting spectrum")

    # Constrain X/Y gain
    if parms["doXYRelGain"] and (parms["XYRelGainCal"] != None):
        mess = "Calibrate X/Y relative gain:"
        printMess(mess, logFile)
        plotFile = "./" + fileRoot + "XYGainCal.ps"
        retCode = EVLACalAP (uv, [], parms["XYRelGainCal"], err,
                             doCalib=2, doBand=1, BPVer=1, flagVer=2, flagFail=False,  \
                             BChan=parms["ampBChan"], EChan=parms["ampEChan"], \
                             solInt=parms["solInt"], solSmo=1440., ampScalar=parms["ampScalar"], \
                             doPlot=parms["doSNPlot"], plotFile=plotFile,  refAnt=parms["refAnt"], \
                             nThreads=nThreads, noScrat=noScrat, logfile=logFile, check=check, debug=debug)
        if retCode != 0:
            raise RuntimeError("Error calibrating X/Y gain")
        parms["avgPol"] = True
        # Fix X/Y gain

    # Amp & phase Calibrate
    if parms["doAmpPhaseCal"]:
        plotFile = "./" + fileRoot + "APCal.ps"
        retCode = EVLACalAP (uv, [], parms["ACals"], err, PCals=parms["PCals"],
                             doCalib=2, doBand=1, BPVer=1, flagVer=2, avgPol=parms["avgPol"],  \
                             BChan=parms["ampBChan"], EChan=parms["ampEChan"], \
                             solInt=parms["solInt"], solSmo=parms["solSmo"], ampScalar=parms["ampScalar"], \
                             doAmpEdit=parms["doAmpEdit"], ampSigma=parms["ampSigma"], \
                             ampEditFG=parms["ampEditFG"], \
                             doPlot=parms["doSNPlot"], plotFile=plotFile,  refAnt=parms["refAnt"], \
                             nThreads=nThreads, noScrat=noScrat, logfile=logFile, check=check, debug=debug)
        if retCode != 0:
            raise RuntimeError("Error calibrating")

    # More editing
    if parms["doAutoFlag"]:
        mess = "Post calibration editing:"
        printMess(mess, logFile)
        # if going to redo then only calibrators
        if parms["doRecal"]:
            # Only calibrators
            clist = []
            for DCal in DCals:
                if DCal["Source"] not in clist:
                    clist.append(DCal["Source"])
            for PCal in PCals:
                if PCal["Source"] not in clist:
                    clist.append(DCal["Source"])
            for ACal in ACals:
                if ACal["Source"] not in clist:
                    clist.append(ACal["Source"])
        else:
            clist = []

        retCode = EVLAAutoFlag (uv, clist, err, flagVer=2, \
                                doCalib=2, gainUse=0, doBand=1, BPVer=1,  \
                                IClip=parms["IClip"], minAmp=parms["minAmp"], timeAvg=parms["timeAvg"], \
                                doFD=parms["doAFFD"], FDmaxAmp=parms["FDmaxAmp"], FDmaxV=parms["FDmaxV"], \
                                FDwidMW=parms["FDwidMW"], FDmaxRMS=parms["FDmaxRMS"], \
                                FDmaxRes=parms["FDmaxRes"],  FDmaxResBL=parms["FDmaxResBL"], \
                                FDbaseSel=parms["FDbaseSel"], \
                                nThreads=nThreads, logfile=logFile, check=check, debug=debug)
        if retCode != 0:
            raise RuntimeError("Error in AutoFlag")

    # Redo the calibration using new flagging?
    if parms["doBPCal2"] == None:
        parms["doBPCal2"] = parms["doBPCal"]
    if parms["doDelayCal2"] == None:
        parms["doDelayCal2"] = parms["doDelayCal2"]
    if parms["doXYRelGain2"] == None:
        parms["doXYRelGain2"] = parms["doXYRelGain"]
    if parms["doAmpPhaseCal2"] == None:
        parms["doAmpPhaseCal2"] = parms["doAmpPhaseCal"]
    if parms["doAutoFlag2"] == None:
        parms["doAutoFlagCal2"] = parms["doAutoFlag"]
    if parms["doRecal"]:
        mess = "Redo calibration:"
        printMess(mess, logFile)
        EVLAClearCal(uv,
                     err,
                     doGain=True,
                     doFlag=False,
                     doBP=True,
                     check=check,
                     logfile=logFile)
        OErr.printErrMsg(err, "Error resetting calibration")
        # Delay recalibration
        if parms["doDelayCal2"] and parms["DCals"] and not check:
            plotFile = "./" + fileRoot + "DelayCal2.ps"
            retCode = EVLADelayCal(uv, parms["DCals"], err, \
                                   BChan=parms["delayBChan"], EChan=parms["delayEChan"], \
                                   doCalib=2, flagVer=2, doBand=-1, \
                                   solInt=parms["delaySolInt"], smoTime=1.0/60.0,  \
                                   refAnts=[parms["refAnt"]], doTwo=parms["doTwo"], \
                                   doZeroPhs=parms["delayZeroPhs"], \
                                   doPlot=parms["doSNPlot"], plotFile=plotFile, \
                                   nThreads=nThreads, noScrat=noScrat, \
                                   logfile=logFile, check=check, debug=debug)
            if retCode != 0:
                raise RuntimeError("Error in delay calibration")

            # Plot corrected data?
            if parms["doSpecPlot"] and parms["plotSource"]:
                plotFile = "./" + fileRoot + "DelaySpec2.ps"
                retCode = EVLASpectrum(uv, parms["plotSource"], parms["plotTime"], plotFile, parms["refAnt"], err, \
                                       Stokes=["RR","LL"], doband=-1,          \
                                       check=check, debug=debug, logfile=logFile )
                if retCode != 0:
                    raise RuntimeError("Error in Plotting spectrum")

        # Bandpass calibration
        if parms["doBPCal2"] and parms["BPCals"]:
            retCode = EVLABPCal(uv, parms["BPCals"], err, noScrat=noScrat, solInt1=parms["bpsolint1"], \
                                    solInt2=parms["bpsolint2"], solMode=parms["bpsolMode"], \
                                    BChan1=parms["bpBChan1"], EChan1=parms["bpEChan1"], \
                                    BChan2=parms["bpBChan2"], EChan2=parms["bpEChan2"], ChWid2=parms["bpChWid2"], \
                                    doCenter1=parms["bpDoCenter1"], refAnt=parms["refAnt"], \
                                    UVRange=parms["bpUVRange"], doCalib=2, gainUse=0, flagVer=2, doPlot=False, \
                                    nThreads=nThreads, logfile=logFile, check=check, debug=debug)
            if retCode != 0:
                raise RuntimeError("Error in Bandpass calibration")

        # Plot corrected data?
            if parms["doSpecPlot"] and parms["plotSource"]:
                plotFile = "./" + fileRoot + "BPSpec2.ps"
                retCode = EVLASpectrum(uv, parms["plotSource"], parms["plotTime"], plotFile, parms["refAnt"], err, \
                                           Stokes=["RR","LL"], doband=1,          \
                                           check=check, debug=debug, logfile=logFile )
                if retCode != 0:
                    raise RuntimeError("Error in Plotting spectrum")

        # Constrain X/Y gain
        parms["avgPol"] = False
        # Fix X/Y gain?
        if parms["doXYRelGain2"] and (parms["XYRelGainCal"] != None):
            mess = "Calibrate X/Y relative gain:"
            printMess(mess, logFile)
            plotFile = "./" + fileRoot + "XYGainCal2.ps"
            retCode = EVLACalAP (uv, [], parms["XYRelGainCal"], err,
                                 doCalib=2, doBand=1, BPVer=1, flagVer=2, timeRange=parms["XYRelGainCalTime"], \
                                     BChan=parms["ampBChan"], EChan=parms["ampEChan"], flagFail=False, \
                                     solInt=parms["solInt"], solSmo=1440., ampScalar=parms["ampScalar"], \
                                     doPlot=parms["doSNPlot"], plotFile=plotFile,  refAnt=parms["refAnt"], \
                                     nThreads=nThreads, noScrat=noScrat, logfile=logFile, check=check, debug=debug)
            if retCode != 0:
                raise RuntimeError("Error calibrating X/Y gain")
            parms["avgPol"] = True
            # Fix X/Y gain

        # Amp & phase Recalibrate
        if parms["doAmpPhaseCal2"]:
            plotFile = "./" + fileRoot + "APCal2.ps"
            retCode = EVLACalAP (uv, [], parms["ACals"], err, PCals=parms["PCals"], \
                                 doCalib=2, doBand=1, BPVer=1, flagVer=2,avgPol=parms["avgPol"],  \
                                 BChan=parms["ampBChan"], EChan=parms["ampEChan"], \
                                 solInt=parms["solInt"], solSmo=parms["solSmo"], ampScalar=parms["ampScalar"], \
                                 doAmpEdit=parms["doAmpEdit"], ampSigma=parms["ampSigma"], \
                                 ampEditFG=parms["ampEditFG"], \
                                 doPlot=parms["doSNPlot"], plotFile=plotFile, refAnt=parms["refAnt"], \
                                 noScrat=noScrat, nThreads=nThreads, logfile=logFile, check=check, debug=debug)
            if retCode != 0:
                raise RuntimeError("Error calibrating")

        # More editing
        if parms["doAutoFlag2"]:
            mess = "Post recalibration editing:"
            printMess(mess, logFile)
            retCode = EVLAAutoFlag (uv, [], err, flagVer=2, \
                                    doCalib=2, gainUse=0, doBand=1, BPVer=1,  \
                                    IClip=parms["IClip"], minAmp=parms["minAmp"], timeAvg=parms["timeAvg"], \
                                    doFD=parms["doAFFD"], FDmaxAmp=parms["FDmaxAmp"], FDmaxV=parms["FDmaxV"], \
                                    FDwidMW=parms["FDwidMW"], FDmaxRMS=parms["FDmaxRMS"], \
                                    FDmaxRes=parms["FDmaxRes"],  FDmaxResBL= parms["FDmaxResBL"], \
                                    FDbaseSel=parms["FDbaseSel"], \
                                    nThreads=nThreads, logfile=logFile, check=check, debug=debug)
            if retCode != 0:
                raise RuntimeError("Error in AutoFlag")

    # end recal

    # Calibrate and average data
    if parms["doCalAvg"]:
        retCode = EVLACalAvg (uv, avgClass, parms["seq"], parms["CalAvgTime"], err, \
                              flagVer=2, doCalib=2, gainUse=0, doBand=1, BPVer=1, doPol=False, \
                              avgFreq=parms["avgFreq"], chAvg=parms["chAvg"], \
                              BChan=parms["CABChan"], EChan=parms["CAEChan"], \
                              BIF=parms["CABIF"], EIF=parms["CAEIF"], Compress=parms["Compress"], \
                              nThreads=nThreads, logfile=logFile, check=check, debug=debug)
        if retCode != 0:
            raise RuntimeError("Error in CalAvg")

    # Get calibrated/averaged data
    if not check:
        uv = UV.newPAUV("AIPS UV DATA", EVLAAIPSName(project, session), avgClass[0:6], \
                        disk, parms["seq"], True, err)
        if err.isErr:
            OErr.printErrMsg(err, "Error creating cal/avg AIPS data")

    # XClip
    if parms["XClip"] and parms["XClip"] > 0.0:
        mess = "Cross Pol clipping:"
        printMess(mess, logFile)
        retCode = EVLAAutoFlag (uv, [], err, flagVer=-1, flagTab=1, \
                                doCalib=2, gainUse=0, doBand=-1, maxBad=1.0,  \
                                XClip=parms["XClip"], timeAvg=1./60., \
                                nThreads=nThreads, logfile=logFile, check=check, debug=debug)
        if retCode != 0:
            raise RuntimeError("Error in AutoFlag")

    # R-L  delay calibration cal if needed,
    if parms["doRLDelay"] and parms["RLDCal"][0][0] != None:
        if parms["rlrefAnt"] <= 0:
            parms["rlrefAnt"] = parms["refAnt"]
        # parms["rlDoBand"] if before average, BPVer=parms["rlBPVer"],
        retCode = EVLARLDelay(uv, err,\
                              RLDCal=parms["RLDCal"], BChan=parms["rlBChan"], \
                              EChan=parms["rlEChan"], UVRange=parms["rlUVRange"], \
                              soucode=parms["rlCalCode"], doCalib=parms["rlDoCal"], gainUse=parms["rlgainUse"], \
                              timerange=parms["rltimerange"], \
                              # NOT HERE doBand=parms["rlDoBand"], BPVer=parms["rlBPVer"],  \
                              flagVer=parms["rlflagVer"], \
                              refAnt=parms["rlrefAnt"], doPol=False,  \
                              nThreads=nThreads, noScrat=noScrat, logfile=logFile, \
                              check=check, debug=debug)
        if retCode != 0:
            raise RuntimeError("Error in R-L delay calibration")

    # Polarization calibration
    if parms["doPolCal"]:
        if parms["PCRefAnt"] <= 0:
            parms["PCRefAnt"] = parms["refAnt"]
        retCode = EVLAPolCal(uv, parms["PCInsCals"], err, \
                             doCalib=2, gainUse=0, doBand=-1, flagVer=0, \
                             fixPoln=parms["PCFixPoln"], pmodel=parms["PCpmodel"], avgIF=parms["PCAvgIF"], \
                             solInt=parms["PCSolInt"], refAnt=parms["PCRefAnt"], solType=parms["PCSolType"], \
                             ChInc=parms["PCChInc"], ChWid=parms["PCChWid"], \
                             nThreads=nThreads, check=check, debug=debug, noScrat=noScrat, logfile=logFile)
        if retCode != 0 and (not check):
            raise RuntimeError("Error in polarization calibration: " +
                               str(retCode))
        # end poln cal.

    # R-L phase calibration cal., creates new BP table
    if parms["doRLCal"] and parms["RLDCal"][0][0] != None:
        plotFile = "./" + fileRoot + "RLSpec2.ps"
        if parms["rlrefAnt"] <= 0:
            parms["rlrefAnt"] = parms["refAnt"]
        retCode = EVLARLCal(uv, err,\
                            RLDCal=parms["RLDCal"], BChan=parms["rlBChan"],
                            EChan=parms["rlEChan"], UVRange=parms["rlUVRange"], \
                            ChWid2=parms["rlChWid"], solInt1=parms["rlsolint1"], solInt2=parms["rlsolint2"], \
                            RLPCal=parms["RLPCal"], RLPhase=parms["RLPhase"], \
                            RM=parms["RLRM"], CleanRad=parms["rlCleanRad"], \
                            calcode=parms["rlCalCode"], doCalib=parms["rlDoCal"], gainUse=parms["rlgainUse"], \
                            timerange=parms["rltimerange"], FOV=parms["rlFOV"], \
                            doBand=-1, BPVer=1, flagVer=parms["rlflagVer"], \
                            refAnt=parms["rlrefAnt"], doPol=parms["doPol"], PDVer=parms["PDVer"],  \
                            doPlot=parms["doSpecPlot"], plotFile=plotFile, \
                            nThreads=nThreads, noScrat=noScrat, logfile=logFile, \
                            check=check, debug=debug)
        if retCode != 0:
            raise RuntimeError("Error in RL phase spectrum calibration")

    # VClip
    if parms["VClip"] and parms["VClip"] > 0.0:
        mess = "VPol clipping:"
        printMess(mess, logFile)
        retCode = EVLAAutoFlag (uv, [], err, flagVer=-1, flagTab=1, \
                                doCalib=2, gainUse=0, doBand=-1,  \
                                VClip=parms["VClip"], timeAvg=parms["timeAvg"], \
                                nThreads=nThreads, logfile=logFile, check=check, debug=debug)
        if retCode != 0:
            raise RuntimeError("Error in AutoFlag VClip")

    # Plot corrected data?
    if parms["doSpecPlot"] and parms["plotSource"]:
        plotFile = "./" + fileRoot + "Spec.ps"
        retCode = EVLASpectrum(uv, parms["plotSource"], parms["plotTime"], \
                               plotFile, parms["refAnt"], err, \
                               Stokes=["RR","LL"], doband=-1,          \
                               check=check, debug=debug, logfile=logFile )
        if retCode != 0:
            raise RuntimeError("Error in Plotting spectrum")

    # Image targets
    if parms["doImage"]:
        # If targets not specified, image all
        if len(parms["targets"]) <= 0:
            slist = EVLAAllSource(uv,
                                  err,
                                  logfile=logFile,
                                  check=check,
                                  debug=debug)
        else:
            slist = parms["targets"]
        EVLAImageTargets (uv, err, Sources=slist, seq=parms["seq"], sclass=outIClass, \
                          doCalib=2, doBand=1,  flagVer=1, doPol=parms["doPol"], PDVer=parms["PDVer"],  \
                          Stokes=parms["Stokes"], FOV=parms["FOV"], Robust=parms["Robust"], Niter=parms["Niter"], \
                          CleanRad=parms["CleanRad"], minFlux=parms["minFlux"], \
                          maxPSCLoop=parms["maxPSCLoop"], minFluxPSC=parms["minFluxPSC"], \
                          solPInt=parms["solPInt"], solPMode=parms["solPMode"], solPType=parms["solPType"], \
                          maxASCLoop=parms["maxASCLoop"], minFluxASC=parms["minFluxASC"], \
                          solAInt=parms["solAInt"], solAMode=parms["solAMode"], solAType=parms["solAType"], \
                          avgPol=parms["avgPol"], avgIF=parms["avgIF"], minSNR = 4.0, refAnt=parms["refAnt"], \
                          do3D=parms["do3D"], BLFact=parms["BLFact"], BLchAvg=parms["BLchAvg"], \
                          doMB=parms["doMB"], norder=parms["MBnorder"], maxFBW=parms["MBmaxFBW"], \
                          PBCor=parms["PBCor"],antSize=parms["antSize"], Beam=parms["Beam"], \
                          nTaper=parms["nTaper"], Tapers=parms["Tapers"], \
                          nThreads=nThreads, noScrat=noScrat, logfile=logFile, check=check, debug=debug)
        # End image

    # Get report on sources
    if parms["doReport"]:
        # If targets not specified, do all
        if len(parms["targets"]) <= 0:
            slist = EVLAAllSource(uv,
                                  err,
                                  logfile=logFile,
                                  check=check,
                                  debug=debug)
        else:
            slist = parms["targets"]
        Report = EVLAReportTargets(uv, err, Sources=slist, seq=parms["seq"], sclass=outIClass, \
                                       Stokes=parms["Stokes"], logfile=logFile, check=check, debug=debug)
        # Save to pickle jar
        ReportPicklefile = "./" + fileRoot + "Report.pickle"  # Where results saved
        SaveObject(Report, ReportPicklefile, True)

    # Write results, cleanup
    # Save cal/average UV data?
    if parms["doSaveUV"] and (not check):
        Aname = EVLAAIPSName(project, session)
        cno = AIPSDir.PTestCNO(disk, user, Aname, avgClass[0:6], "UV",
                               parms["seq"], err)
        if cno > 0:
            uvt = UV.newPAUV("AIPS CAL UV DATA", Aname, avgClass, disk,
                             parms["seq"], True, err)
            filename = parms["project"] + parms["session"] + parms[
                "band"] + "Cal.uvtab"
            fuv = EVLAUVFITS(uvt,
                             filename,
                             0,
                             err,
                             compress=parms["Compress"],
                             logfile=logFile)
            EVLAAddOutFile(filename, 'project', "Calibrated Averaged UV data")
            # Save list of output files
            EVLASaveOutFiles()
            del uvt
    # Save raw UV data tables?
    if parms["doSaveTab"] and (not check):
        Aname = EVLAAIPSName(project, session)
        cno = AIPSDir.PTestCNO(disk, user, Aname, dataClass[0:6], "UV",
                               parms["seq"], err)
        if cno > 0:
            uvt = UV.newPAUV("AIPS RAW UV DATA", Aname, dataClass[0:6], disk,
                             parms["seq"], True, err)
            filename = parms["project"] + parms["session"] + parms[
                "band"] + "CalTab.uvtab"
            fuv = EVLAUVFITSTab(uvt, filename, 0, err, logfile=logFile)
            EVLAAddOutFile(filename, 'project', "Calibrated AIPS tables")
            del uvt
            # Write History
            filename = project + '_' + session + '_' + band + ".History.text"
            OTObit.PrintHistory(uv, file=filename)
            EVLAAddOutFile(filename, 'project',
                           "Processing history of calibrated data")
            # Save list of output files
            EVLASaveOutFiles()
    # Imaging results
    # If targets not specified, save all
    if len(parms["targets"]) <= 0:
        slist = EVLAAllSource(uv,
                              err,
                              logfile=logFile,
                              check=check,
                              debug=debug)
    else:
        slist = parms["targets"]
    for target in slist:
        if parms["doSaveImg"] and (not check):
            for s in parms["Stokes"]:
                oclass = s + outIClass[1:]
                outname = target
                # Test if image exists
                cno = AIPSDir.PTestCNO(disk, user, outname, oclass, "MA",
                                       parms["seq"], err)
                if cno <= 0:
                    continue
                x = Image.newPAImage("out", outname, oclass, disk,
                                     parms["seq"], True, err)
                outfile = "./" + fileRoot + target + "." + oclass + ".fits"
                xf = EVLAImFITS(x, outfile, 0, err, logfile=logFile)
                EVLAAddOutFile(outfile, target, 'Image of ' + target)
                # Statistics
                zz = imstat(x, err, logfile=logFile)
    # end writing loop

    # Save list of output files
    EVLASaveOutFiles()
    OErr.printErrMsg(err, "Writing output")

    # Contour plots
    if parms["doKntrPlots"]:
        mess = "INFO --> Contour plots (doKntrPlots)"
        printMess(mess, logFile)
        EVLAKntrPlots(err,
                      imName=parms["targets"],
                      project=project,
                      session=session,
                      band=band,
                      disk=disk,
                      debug=debug)
        # Save list of output files
        EVLASaveOutFiles()
    elif debug:
        mess = "Not creating contour plots ( doKntrPlots = " + str(
            parms["doKntrPlots"]) + " )"
        printMess(mess, logFile)

    # Source uv plane diagnostic plots
    if parms["doDiagPlots"]:
        mess = "INFO --> Diagnostic plots (doDiagPlots)"
        printMess(mess, logFile)
        # Get the highest number avgClass catalog file
        Aname = EVLAAIPSName(project, session)
        uvc = None
        if not check:
            uvname = project + "_" + session + "_" + band + "_Cal"
            uvc = UV.newPAUV(uvname, Aname, avgClass, disk, parms["seq"], True,
                             err)
        EVLADiagPlots( uvc, err, cleanUp=parms["doCleanup"], \
                           project=project, session=session, band=band, \
                           logfile=logFile, check=check, debug=debug )
        # Save list of output files
        EVLASaveOutFiles()
    elif debug:
        mess = "Not creating diagnostic plots ( doDiagPlots = " + str(
            parms["doDiagPlots"]) + " )"
        printMess(mess, logFile)

    # Save metadata
    srcMetadata = None
    projMetadata = None
    if parms["doMetadata"]:
        mess = "INFO --> Save metadata (doMetadata)"
        printMess(mess, logFile)
        uvc = None
        if not uvc:
            # Get calibrated/averaged data
            Aname = EVLAAIPSName(project, session)
            uvname = project + "_" + session + "_" + band + "_Cal"
            uvc = UV.newPAUV(uvname, Aname, avgClass, disk, parms["seq"], True,
                             err)
            if err.isErr:
                OErr.printErrMsg(err, "Error creating cal/avg AIPS data")

        # Get source metadata; save to pickle file
        srcMetadata = EVLASrcMetadata( uvc, err, Sources=parms["targets"], seq=parms["seq"], \
                                       sclass=outIClass, Stokes=parms["Stokes"],\
                                       logfile=logFile, check=check, debug=debug )
        picklefile = "./" + fileRoot + ".SrcReport.pickle"
        SaveObject(srcMetadata, picklefile, True)
        EVLAAddOutFile(picklefile, 'project', 'All source metadata')

        # Get project metadata; save to pickle file
        projMetadata = EVLAProjMetadata( uvc, AIPS_VERSION, err, \
            PCals=parms["PCals"], ACals=parms["ACals"], \
            BPCals=parms["BPCals"], DCals=parms["DCals"], \
            project = project, session = session, band = band, \
            dataInUVF = parms["archRoot"], archFileID = 66666 )
        picklefile = "./" + fileRoot + ".ProjReport.pickle"
        SaveObject(projMetadata, picklefile, True)
        EVLAAddOutFile(picklefile, 'project', 'Project metadata')
    else:
        # Fetch from pickle jar
        picklefile = "./" + fileRoot + ".SrcReport.pickle"
        srcMetadata = FetchObject(picklefile)
        picklefile = "./" + fileRoot + ".ProjReport.pickle"
        projMetadata = FetchObject(picklefile)

    # Write report
    if parms["doHTML"]:
        mess = "INFO --> Write HTML report (doHTML)"
        printMess(mess, logFile)
        EVLAHTMLReport( projMetadata, srcMetadata, \
                            outfile="./"+fileRoot+".report.html", \
                            logFile=logFile )

    # Write VOTable
    if parms["doVOTable"]:
        mess = "INFO --> Write VOTable (doVOTable)"
        printMess(mess, logFile)
        EVLAAddOutFile('VOTable.xml', 'project', 'VOTable report')
        EVLAWriteVOTable(projMetadata, srcMetadata, filename='VOTable.xml')

    # Save list of output files
    EVLASaveOutFiles()

    # Cleanup - delete AIPS files
    if parms["doCleanup"] and (not check):
        mess = "INFO --> Clean up (doCleanup)"
        printMess(mess, logFile)
        # Delete target images
        # How many Stokes images
        nstok = len(parms["Stokes"])
        for istok in range(0, nstok):
            oclass = parms["Stokes"][istok:istok + 1] + outIClass[1:]
            AllDest(err, disk=disk, Aseq=parms["seq"], Aclass=oclass)

        # Delete initial UV data
        Aname = EVLAAIPSName(project, session)
        # Test if data exists
        cno = AIPSDir.PTestCNO(disk, user, Aname, dataClass[0:6], "UV",
                               parms["seq"], err)
        if cno > 0:
            uvt = UV.newPAUV("AIPS RAW UV DATA", Aname, dataClass[0:6], disk,
                             parms["seq"], True, err)
            uvt.Zap(err)
            del uvt
            if err.isErr:
                OErr.printErrMsg(err, "Error deleting raw AIPS data")
        # Zap calibrated/averaged data
        # Test if data exists
        cno = AIPSDir.PTestCNO(disk, user, Aname, avgClass[0:6], "UV",
                               parms["seq"], err)
        if cno > 0:
            uvt = UV.newPAUV("AIPS CAL UV DATA", Aname, avgClass[0:6], disk,
                             parms["seq"], True, err)
            uvt.Zap(err)
            del uvt
            if err.isErr:
                OErr.printErrMsg(err, "Error deleting cal/avg AIPS data")
        # Zap UnHanned data if present
        loadClass = parms["band"] + "Raw"
        # Test if image exists
        cno = AIPSDir.PTestCNO(disk, user, Aname, loadClass[0:6], "UV",
                               parms["seq"], err)
        if cno > 0:
            uvt = UV.newPAUV("AIPS CAL UV DATA", Aname, loadClass[0:6], disk,
                             parms["seq"], True, err)
            uvt.Zap(err)
            del uvt
            if err.isErr:
                OErr.printErrMsg(err, "Error deleting cal/avg AIPS data")
        OErr.printErrMsg(err, "Writing output/cleanup")

    # Shutdown
    mess = "Finished project "+parms["project"]+" session "+parms["session"]+ \
    " "+parms["band"]+" Band"+" AIPS user no. "+str(AIPS.userno)
    printMess(mess, logFile)
    OErr.printErr(err)
    OSystem.Shutdown(ObitSys)
Exemplo n.º 30
0
def newPAImage(name, Aname, Aclass, disk, seq, exists, err, verbose=False):
    """
    Create and initialize an AIPS based Image structure
    
    Create, set initial access information (full image, plane at a time)
    and if exists verifies the file.
    Returns the Python Image object
    isOK member set to indicate success

    * name     = name desired for object (labeling purposes)
    * Aname    = AIPS name of file
    * Aclass   = AIPS class of file
    * seq      = AIPS sequence number of file
    * disk     = FITS directory number
    * exists   = if true then the file is opened and closed to verify
    * err      = Python Obit Error/message stack
    * verbose  = If true any give error messages, else suppress
    """
    ################################################################
    out = ImageMF(name)
    out.isOK = True  # until proven otherwise
    cno = -1
    user = OSystem.PGetAIPSuser()
    # print "disk, aseq", disk, seq
    # Does it really previously exist?
    test = AIPSDir.PTestCNO(disk, user, Aname, Aclass, "MA", seq, err)
    out.exist = test > 0
    if exists:  # If user thinks file exists...
        if out.exist:  # If file is defined in catalog -> verify that file exists
            OErr.PLog(err, OErr.Info, Aname + " image found. Now verifying...")
            if verbose: OErr.printErr(err)
            cno = AIPSDir.PFindCNO(disk, user, Aname, Aclass, "MA", seq, err)
            Obit.ImageMFSetAIPS(out.me, 2, disk, cno, user, blc, trc, err.me)
            Obit.ImagefullInstantiate(out.cast("ObitImage"), 1, err.me)
            #print "found",Aname,Aclass,"as",cno
        else:  # If file not defined in catalog -> error
            OErr.PLog(err, OErr.Error, Aname + " image does not exist")
            out.isOK = False
    else:  # exists=False
        # Create new image entry in catalog; if image already defined, this
        # has no effect
        OErr.PLog(err, OErr.Info,
                  "Creating new image: " + Aname + ", " + Aclass)
        if verbose: OErr.printErr(err)
        cno = AIPSDir.PAlloc(disk, user, Aname, Aclass, "MA", seq, err)
        Obit.ImageMFSetAIPS(out.me, 2, disk, cno, user, blc, trc, err.me)
        #print "assigned",Aname,Aclass,"to",cno

    # show any errors if wanted
    if verbose and err.isErr:
        out.isOK = False
        OErr.printErrMsg(err, "Error creating AIPS Image object")
    elif err.isErr:
        out.isOK = False
        OErr.PClear(err)  # Clear unwanted messages
    else:
        OErr.PClear(err)  # Clear non-error messages

    # It work?
    if not out.isOK:
        return out

    # Add File info
    out.FileType = 'AIPS'
    out.Disk = disk
    out.Aname = Aname
    out.Aclass = Aclass
    out.Aseq = seq
    out.Otype = "Image"
    out.Acno = cno
    return out  # seems OK
Exemplo n.º 31
0
OErr.printErrMsg(err, "Error with Obit startup")

# For debugging
#print sys.argv
#Obit.Bomb()

# Get file names
inFile = sys.argv[1]
pntFile = sys.argv[2]
outFile = sys.argv[3]
inDisk = 1
outDisk = 1

# Set data
inImage = Image.newPImage("Input image", inFile, inDisk, 1, err)
pntImage = Image.newPImage("Pointing image", pntFile, inDisk, 1, err)
outImage = Image.newPImage("Output image", outFile, outDisk, 0, err)
Image.PClone(inImage, outImage, err)  # Same structure etc.
OErr.printErrMsg(err, "Error initializing")

# do it - defaulting plane, antena size
ImageUtil.PPBApply(inImage, pntImage, outImage, err)
OErr.printErrMsg(err, "Error correcting image")

# Say something
print "PB applied to", inFile, "writing", outFile, ", using pointing from", pntFile

# Shutdown Obit
OErr.printErr(err)
del ObitSys
Exemplo n.º 32
0
def ConvertKATData(outUV, katdata, meta, err, stop_w=False, timeav=1):
    """
    Read KAT HDF data and write Obit UV

     * outUV    = Obit UV object
     * katdata  = input KAT dataset
     * meta     = dict with data meta data
     * err      = Python Obit Error/message stack to init
    """
    ################################################################
    reffreq = meta["spw"][0][1]  # reference frequency
    lamb = 2.997924562e8 / reffreq  # wavelength of reference freq
    nchan = meta["spw"][0][0]  # number of channels
    nif = len(meta["spw"])  # Number of IFs
    nstok = meta["nstokes"]  # Number of Stokes products
    p = meta["products"]  # baseline stokes indices
    nprod = len(p)  # number of correlations/baselines
    ants = meta["ants"]
    antslookup = {}
    for ant in katdata.ants:
        antslookup[ant.name] = ant
    # work out Start time in unix sec
    tm = katdata.timestamps[1:2]
    tx = time.gmtime(tm[0])
    time0 = tm[0] - tx[3] * 3600.0 - tx[4] * 60.0 - tx[5]

    # Set data to read one vis per IO
    outUV.List.set("nVisPIO", 1)

    # Open data
    zz = outUV.Open(UV.READWRITE, err)
    if err.isErr:
        OErr.printErrMsg(err, "Error opening output UV")
    # visibility record offsets
    d = outUV.Desc.Dict
    ilocu = d['ilocu']
    ilocv = d['ilocv']
    ilocw = d['ilocw']
    iloct = d['iloct']
    ilocb = d['ilocb']
    ilocsu = d['ilocsu']
    nrparm = d['nrparm']
    jlocc = d['jlocc']
    jlocs = d['jlocs']
    jlocf = d['jlocf']
    jlocif = d['jlocif']
    naxes = d['inaxes']
    count = 0.0
    visno = 0
    # Get IO buffers as numpy arrays
    shape = len(outUV.VisBuf) / 4
    buffer = numpy.frombuffer(outUV.VisBuf, dtype=numpy.float32, count=shape)

    # Template vis
    vis = outUV.ReadVis(err, firstVis=1)
    first = True
    firstVis = 1
    numflags = 0
    numvis = 0
    # Do we need to stop Fringes
    if stop_w:
        msg = "W term in UVW coordinates will be used to stop the fringes."
        OErr.PLog(err, OErr.Info, msg)
        OErr.printErr(err)
        print msg
    for scan, state, target in katdata.scans():
        # Fetch data - may blow core
        try:
            tm = katdata.timestamps
            vs = katdata.vis
            wt = katdata.weights
            #fg = katdata.flags
            fg = numpy.zeros_like(wt, dtype=numpy.bool)
        except:
            # Damn, save what you've got
            msg = "Blew core - try to recover"
            OErr.PLog(err, OErr.Info, msg)
            OErr.printErr(err)
            print msg
            outUV.Close(err)
            zz = outUV.Open(UV.READWRITE, err)
            if err.isErr:
                OErr.printErrMsg(err, "Error closing/reopening data")
                return
            continue
        if timeav > 1:
            vs, wt, fg, tm = AverageTime(vs, wt, fg, tm, int(timeav))
            #Lets average the data!!!
            #vs,wt,fg,tm,cf=averager.average_visibilities(vs,wt,fg,tm,katdata.channel_freqs,timeav=int(timeav),chanav=1,flagav=True)
        nint = len(tm)
        #Get target suid
        # Only on targets in the input list
        try:
            suid = meta["targLookup"][target.name[0:16]]
        except:
            continue
        # Negate the weights that are online flagged (ie. apply the online flags here)
        #wt[numpy.where(fg)]=-32767.
        numflags += numpy.sum(fg)
        numvis += fg.size
        uu = numpy.zeros((
            len(tm),
            katdata.shape[2],
        ), dtype=numpy.float64)
        vv = numpy.zeros_like(uu)
        ww = numpy.zeros_like(uu)
        for num, corr_prod in enumerate(katdata.corr_products):
            uvw_coordinates = numpy.array(
                target.uvw(antslookup[corr_prod[0][:4]],
                           timestamp=tm,
                           antenna=antslookup[corr_prod[1][:4]]))
            uu[:, num] = uvw_coordinates[0]
            vv[:, num] = uvw_coordinates[1]
            ww[:, num] = uvw_coordinates[2]
        # Number of integrations
        msg = "Scan:%4d Int: %4d %16s Start %s" % (
            scan, nint, target.name, day2dhms((tm[0] - time0) / 86400.0)[0:12])
        OErr.PLog(err, OErr.Info, msg)
        OErr.printErr(err)
        print msg
        # Loop over integrations
        for iint in range(0, nint):
            vsdump = vs[iint:iint + 1]
            wtdump = wt[iint:iint + 1]
            #loop over baselines
            for ibase in itertools.combinations_with_replacement(ants, 2):
                #loop over polarisations
                for istok in range(0, nstok):
                    icorrprod = (ibase[0][0], ibase[1][0], istok)
                    iprod = p.index(icorrprod)
                    thisvis = vsdump[0, :, iprod:iprod + 1]
                    thiswt = wtdump[0, :, iprod:iprod + 1]
                    #thisw=ww[iint:iint+1,iprod]
                    # Fringe stop the data if necessary
                    #if stop_w:
                    #    thisvis=StopFringes(thisvis[:,:,0],katdata.channel_freqs,thisw,katdata.corr_products[iprod])
                    # Copy slices
                    indx = nrparm + (p[iprod][2]) * 3
                    buffer[indx:indx + (nchan + 1) * nstok * 3:nstok *
                           3] = thisvis.real.flatten()
                    indx += 1
                    buffer[indx:indx + (nchan + 1) * nstok * 3:nstok *
                           3] = thisvis.imag.flatten()
                    indx += 1
                    buffer[indx:indx + (nchan + 1) * nstok * 3:nstok *
                           3] = thiswt.flatten()
                    # Write if Stokes index >= next or the last
                    #if (iprod==nprod-1) or (p[iprod][2]>=p[iprod+1][2]):
                # Random parameters
                buffer[ilocu] = uu[iint][iprod] / lamb
                buffer[ilocv] = vv[iint][iprod] / lamb
                buffer[ilocw] = ww[iint][iprod] / lamb
                buffer[iloct] = (tm[iint] - time0) / 86400.0  # Time in days
                buffer[ilocb] = p[iprod][0] * 256.0 + p[iprod][1]
                buffer[ilocsu] = suid
                outUV.Write(err, firstVis=visno)
                visno += 1
                buffer[3] = -3.14159
                #print visno,buffer[0:5]
                firstVis = None  # Only once
                # initialize visibility
                first = True
        # end loop over integrations
        if err.isErr:
            OErr.printErrMsg(err, "Error writing data")
    # end loop over scan
    if numvis > 0:
        msg = "Applied %s online flags to %s visibilities (%.3f%%)" % (
            numflags, numvis, (float(numflags) / float(numvis) * 100.))
        OErr.PLog(err, OErr.Info, msg)
        OErr.printErr(err)
    outUV.Close(err)
    if err.isErr:
        OErr.printErrMsg(err, "Error closing data")