Ejemplo n.º 1
0
def update_graph(input_path, plot_type):
    if input_path is None:
        return [draw_empty_figure(), "Nothing to draw yet"]

    if os.path.exists(input_path) is False:
        return [draw_empty_figure(), 'MS {} not found'.format(input_path)]

    vis = input_path  # "/Users/lopez/temp/CASA/casa-distro/regression/ic2233/ic2233_1.ms"
    exclude = None

    # remove trailing / for title basename
    if vis[-1] == '/':
        vis = vis[:-1]
    myms = ms()
    try:
        exclude = myms.msseltoindex(vis, baseline=exclude)['antenna1'].tolist()
    except RuntimeError as rterr:  # MSSelection failed
        errmsg = str(rterr)
        errmsg = errmsg.replace('specificion', 'specification')
        errmsg = errmsg.replace('Antenna Expression: ', '')
        raise RuntimeError("Exclude selection error: " + errmsg)

    fig = None
    if plot_type == "XY":
        fig = draw_xy(vis, exclude)
    elif plot_type == "Polar":
        fig = draw_polar(vis, exclude)
    else:
        fig = draw_empty_figure()
    return [fig, 'Drawn selected MS: {}'.format(input_path)]
Ejemplo n.º 2
0
def vel_to_chan(msfile, field, obsid, spw, restfreq, vel):
    """
    Identifies the channel(s) corresponding to input LSRK velocities.
    Useful for choosing which channels to split out or flag if a line is
    expected to be present.

    Args:
        msfile (string): name of measurement set
        field (string): field name
        spw (int): Spectral window number
        obsid (int): Observation ID corresponding to the selected
            spectral window
        restfreq (float): Rest frequency [Hz]
        vel (float or array of floats): input velocity in LSRK frame
            km/s]


    Returns:
        (array) or (int) channel number most closely corresponding to
            input LSRK velocity
    """

    tb = table()
    mstool = ms()
    # open the file
    tb.open(msfile + "/SPECTRAL_WINDOW")
    chanfreqs = tb.getcol("CHAN_FREQ", startrow=spw, nrow=1)
    tb.close()
    tb.open(msfile + "/FIELD")
    fieldnames = tb.getcol("NAME")
    tb.close()
    tb.open(msfile + "/OBSERVATION")
    obstime = np.squeeze(tb.getcol("TIME_RANGE", startrow=obsid, nrow=1))[0]
    tb.close()
    nchan = len(chanfreqs)
    mstool.open(msfile)
    lsrkfreqs = mstool.cvelfreqs(
        spwids=[spw],
        mode="channel",
        nchan=nchan,
        obstime=str(obstime) + "s",
        start=0,
        outframe="LSRK",
    )
    # convert to LSRK velocities [km/s]
    chanvelocities = (restfreq - lsrkfreqs) / restfreq * cc_kms
    mstool.close()
    if type(vel) == np.ndarray:
        outchans = np.zeros_like(vel)
        for i in range(len(vel)):
            outchans[i] = np.argmin(np.abs(chanvelocities - vel[i]))
        return outchans
    else:
        return np.argmin(np.abs(chanvelocities - vel))
Ejemplo n.º 3
0
def read(msname='dsa110-calsrc.ms'):
    """ Read simulated ms and return data
    """

    ms = tools.ms()
    ms.open('dsa110-calsrc.ms')
    dd = ms.getdata(items=['data', 'axis_info', 'uvw'], ifraxis=True)
    data = dd['data']
    #    times = dd['axis_info']['time_axis']['MJDseconds']
    #    plt.plot(data[...,0].flatten().real, data[...,0].flatten().imag, '.')
    logger.info("Read data of shape: {0}".format(data.shape))

    return data
Ejemplo n.º 4
0
def identify_target(vis, fields=None, raise_missing_target=True):
    '''
    Identify the target in the MS that matches target_line_range_kms keys.
    '''

    if fields is None:
        fields = []

    from casatools import ms

    myms = ms()

    # if no fields are provided use observe_target intent
    # I saw once a calibrator also has this intent so check carefully
    # mymsmd.open(vis)
    myms.open(vis)

    mymsmd = myms.metadata()

    if len(fields) < 1:
        fields = mymsmd.fieldsforintent("*TARGET*", True)

    mymsmd.close()
    myms.close()

    if len(fields) < 1:
        casalog.post("ERROR: no fields given to identify.")
        return

    # Match target with the galaxy. Names should be unique enough to do this
    thisgal = None

    # generate a dictonary containing continuum chunks for every spw of every field
    for field in fields:

        for gal in target_line_range_kms:
            if gal in field:
                thisgal = gal
                break

    # Check for match after looping through all fields.
    if thisgal is None:
        if raise_missing_target:
            casalog.post(
                "Unable to match fields to expected galaxy targets: {0}".
                format(fields))
            raise ValueError(
                "Unable to match fields to expected galaxy targets: {0}".
                format(fields))

    return thisgal
def make_flagsummary_freq_plot(myvis, flag_dict=None, save_name=None):
    '''
    This mimics the summary plots made by flagdata, but removes the interactive
    part so we can save it.
    '''

    from casatools import ms

    from casatasks import flagdata

    myms = ms()

    myms.open(myvis)

    mymsmd = myms.metadata()

    if flag_dict is None or 'spw:channel' not in flag_dict:
        flag_dict = flagdata(vis=myvis, mode='summary', spwchan=True, action='calculate')

    fig = plt.figure()
    ax = fig.add_subplot(111)

    spw_nums = mymsmd.spwsforscan(1)

    for spw in spw_nums:
        spw_freqs = mymsmd.chanfreqs(spw) / 1e9  # GHz

        spw_flagfracs = []
        for chan in range(len(spw_freqs)):
            spw_flagfracs.append(flag_dict['spw:channel'][f"{spw}:{chan}"]['flagged'] / flag_dict['spw:channel'][f'{spw}:{chan}']['total'])

        # Plot it.
        plt.plot(spw_freqs, spw_flagfracs, drawstyle='steps-mid', label=f"SPW {spw}")

    mymsmd.close()
    myms.close()

    ax.set_ylim([0.0, 1.5])

    plt.legend(loc='upper center', frameon=True, ncol=4)

    if save_name is None:
        save_name = f"{myvis}_spw_flagfraction"

    fig.savefig(f"{save_name}.png")
    fig.savefig(f"{save_name}.pdf")

    plt.close()
Ejemplo n.º 6
0
def test_end2end(tmpdir):
    data_path = pkg_resources.resource_filename('dsamfs', 'data/')
    param_path = '{0}/test_parameters.yaml'.format(data_path)
    header_path = '{0}/test_header.txt'.format(data_path)
    print(param_path)
    run_fringestopping(param_file=param_path,
                       header_file=header_path,
                       output_dir=tmpdir)
    fname = glob.glob('{0}/*.hdf5'.format(tmpdir))[0]
    UV = UVData()
    UV.read(fname, file_type='uvh5')
    # Check that the baselines are in the correct order
    nant = UV.Nants_data
    abi = get_autobl_indices(nant, casa=False)
    ant1, ant2 = UV.baseline_to_antnums(UV.baseline_array)
    antenna_order = ant2[abi] + 1
    print(antenna_order)
    assert np.all(ant1[abi] == ant2[abi])
    print(UV.time_array[:10])
    print(type(UV.time_array))
    print(UV.time_array.dtype)
    # Check that we can convert to uvfits
    uvh5_to_ms(fname, fname.replace('.hdf5', ''))
    assert os.path.exists(fname.replace('hdf5', 'ms'))
    ms = cc.ms()
    status = ms.open(fname.replace('hdf5', 'ms'))
    assert status
    uvw_ms = ms.getdata('uvw')['uvw']
    ms.close()
    # Check that the UVW coordinates are right in the fits file
    f = pf.open(fname.replace('hdf5', 'fits'))
    uu = (f['PRIMARY'].data['UU'] * u.s * c.c).to_value(u.m)
    vv = (f['PRIMARY'].data['VV'] * u.s * c.c).to_value(u.m)
    ww = (f['PRIMARY'].data['WW'] * u.s * c.c).to_value(u.m)
    ant1_array = f['PRIMARY'].data['ANTENNA1']
    ant2_array = f['PRIMARY'].data['ANTENNA2']

    df_itrf = get_itrf()
    antenna_positions = np.array([
        df_itrf['x_m'], df_itrf['y_m'], df_itrf['z_m']
    ]).T - UV.telescope_location
    blen = np.zeros((ant1_array.shape[0], 3))
    for i, ant1 in enumerate(ant1_array):
        ant2 = ant2_array[i]
        blen[i, ...] = antenna_positions[int(ant2)-1, :] - \
                       antenna_positions[int(ant1)-1, :]

    print(ant1_array[:2], ant2_array[:2])
    assert ant1_array[1] == ant1_array[
        0]  # Check that ant1 and ant2 are defined properly
    time = Time(f['PRIMARY'].data['DATE'], format='jd').mjd
    for i in range(10):
        try:
            if f['PRIMARY'].header['CTYPE{0}'.format(i)] == 'RA':
                ra = f['PRIMARY'].header['CRVAL{0}'.format(i)] * u.deg
            elif f['PRIMARY'].header['CTYPE{0}'.format(i)] == 'DEC':
                dec = f['PRIMARY'].header['CRVAL{0}'.format(i)] * u.deg
        except KeyError:
            continue
    assert ra is not None
    assert dec is not None
    print(time.shape, blen.shape)
    uvw = calc_uvw_blt(blen, time, 'J2000', ra, dec)  # Doesnt make sense
    uvw = -1 * uvw
    print(uvw[:2])
    print(uu[:2])
    print(vv[:2])
    print(ww[:2])
    # Why have the uvw coordinates been inverted?
    assert np.all(np.abs(uvw[:, 0] - uu) < 1e-1)
    assert np.all(np.abs(uvw[:, 1] - vv) < 1e-1)
    assert np.all(np.abs(uvw[:, 2] - ww) < 1e-1)
    assert np.all(np.abs(uvw - uvw_ms.T) < 1e-2)
    UV = UVData()
    UV.read(fname.replace('hdf5', 'ms'), file_type='ms')
    assert np.all(np.abs(UV.antenna_diameters - 4.65) < 1e-4)
Ejemplo n.º 7
0
def build_cont_dat(vis, target_line_range_kms,
                   line_freqs={},
                   fields=[],
                   outfile="cont.dat", overwrite=False, append=False,
                   test_print=False,
                   raise_missing_target=False):
    """
    Creates a cont.dat file for the VLA pipeline. Must be run in CASA (uses msmetadata).
    It currently reads SPW edges in the original observed frame (usually TOPO),
    but writes them down as LSRK. Should not matter much, edges should be flagged anyway.
    Example of cont.dat content from NRAO online documentation:
    https://science.nrao.edu/facilities/vla/data-processing/pipeline/#section-25
    :param vis: path to the measurement set
    :param line_freqs: line frequencies (obs frame, LSRK) in GHz
    :param line_widths: widths of lines (obs frame, LSRK) in GHz to cut from the continuum
    :param fields: science target fields. If empty, TARGET intent fields are used.
    :param outfile: path to the output cont.dat file
    :param overwrite: if True and the outfile exists, it will be overriten
    :param append: add at the end of existing cont.dat file, useful for optimising lines per field
    :return: None
    """

    # from taskinit import msmdtool, mstool
    from casatools import ms

    # need for metadata
    # msmd = msmdtool()
    # mymsmd = msmd()

    # TOPO -> LSRK conversion
    # ms = mstool()
    myms = ms()

    # if no fields are provided use observe_target intent
    # I saw once a calibrator also has this intent so check carefully
    # mymsmd.open(vis)
    myms.open(vis)

    mymsmd = myms.metadata()

    if len(fields) < 1:
        # fields = mymsmd.fieldsforintent("*OBSERVE_TARGET*", True)
        fields = mymsmd.fieldsforintent("*TARGET*", True)

    if len(fields) < 1:
        print("ERROR: no fields!")
        return

    if os.path.exists(outfile) and not overwrite and not append:
        print("ERROR: file already exists!")
        return

    # generate a dictonary containing continuum chunks for every spw of every field
    cont_dat = {}
    for field in fields:
        spws = mymsmd.spwsforfield(field)
        cont_dat_field = {}

        # Match target with the galaxy. Names should be unique enough to do this
        thisgal = None
        for gal in target_line_range_kms:
            if gal in field:
                thisgal = gal
                break
        # Check for match
        if thisgal is None:
            if raise_missing_target:
                raise ValueError("Unable to match field {} to expected galaxy targets".format(field))
            else:
                casalog.post("Unable to match field {} to expected galaxy targets. Skipping.".format(field))
                continue

        for spw in spws:
            # Get freq range of the SPW
            # chan_freqs = mymsmd.chanfreqs(spw)
            # SPW edges are reported in whichever frame was used for observing (usually TOPO)
            # TODO: implement some transformations to LSRK for the edges?

            # Grab freqs in LSRK and TOPO
            freqs_lsrk = myms.cvelfreqs(spwids=[spw], outframe='LSRK')
            freqs_topo = myms.cvelfreqs(spwids=[spw], outframe='TOPO')

            line_freqs_topo = []

            for line in line_freqs:

                restfreq = line_freqs[line] * 1e9

                # Only include if that line has a defined velocity range
                key_match = None
                for key in target_line_range_kms[thisgal]:
                    if key in line:
                        key_match = key
                        break

                if key_match is None:
                    continue

                for vel_range in target_line_range_kms[thisgal][key_match]:

                    vel_start, vel_stop = vel_range

                    freq_to_match_start = lines_rest2obs(restfreq, vel_start)
                    freq_to_match_stop = lines_rest2obs(restfreq, vel_stop)

                    if test_print:
                        print(spw, line, vel_start, vel_stop)
                        print(spw, line, freq_to_match_start, freq_to_match_stop)
                        print(freqs_lsrk.min(), freqs_lsrk.max())

                    # Not within range. Skip.
                    if freq_to_match_start > freqs_lsrk.max() or freq_to_match_stop < freqs_lsrk.min():
                        skip_line = True
                        break

                    skip_line = False

                    # Convert from Hz to GHz
                    freq_topo_start = freq_match_lsrk_to_topo(freq_to_match_start,
                                                              freqs_lsrk, freqs_topo) * 1e-9

                    freq_topo_stop = freq_match_lsrk_to_topo(freq_to_match_stop,
                                                             freqs_lsrk, freqs_topo) * 1e-9

                    if test_print:
                        print("Found range: {0}, {1}".format(freq_topo_start, freq_topo_stop))

                    line_freqs_topo.append([freq_topo_start, freq_topo_stop])

                if skip_line:
                    continue

                spw_start = np.min(freqs_topo) * 1e-9  # GHz
                spw_end = np.max(freqs_topo) * 1e-9  # GHz

                if test_print:
                    print("SPW {}: {}".format(spw, line_freqs_topo))

                cont_chunks = partition_cont_range(line_freqs_topo, spw_start, spw_end,
                                                   test_print=test_print)
                cont_dat_field.update({spw: cont_chunks})

            # print(spw, cont_chunks)
            # print(spw_start, spw_end)

        cont_dat.update({field: cont_dat_field})

    mymsmd.close()
    myms.close()

    # write the dictionary into a file usable by the CASA VLA pipeline
    access_mode = "a" if append else "w"
    with open(outfile, access_mode) as f:
        for field in cont_dat.keys():
            f.write("\nField: " + field + "\n")
            for spw in cont_dat[field].keys():
                if len(cont_dat[field][spw]) > 0:
                    f.write("\nSpectralWindow: " + str(spw) + "\n")
                    for chunk in cont_dat[field][spw]:
                        f.write(str(chunk["start"]) + "~" + str(chunk["end"]) + "GHz TOPO\n")
            f.write("\n")

    print("DONE: written in " + outfile)
Ejemplo n.º 8
0
def weight_multichan(base_ms, npix, cell_size, robust=np.array([0.]), chans=np.array([2]), method='briggs', perchanweight=False, mod_pcwd=False, npixels=0):
    tb = casatools.table()
    ms = casatools.ms()

    # Use CASA table tools to get frequencies
    tb.open(base_ms+"/SPECTRAL_WINDOW")
    chan_freqs = tb.getcol("CHAN_FREQ")
    rfreq = tb.getcol("REF_FREQUENCY")
    tb.close()

    # Use CASA table tools to get columns of UVW, DATA, WEIGHT, etc.
    tb.open(base_ms, nomodify=False)
    flag   = tb.getcol("FLAG")
    sigma   = tb.getcol("SIGMA")
    uvw     = tb.getcol("UVW")
    weight  = tb.getcol("WEIGHT")
    ant1    = tb.getcol("ANTENNA1")
    ant2    = tb.getcol("ANTENNA2")
    tb.close()

    flag = np.logical_not(np.prod(flag, axis=(0,2)).T)

    # break out the u, v spatial frequencies, convert from m to lambda
    uu = uvw[0,:][:,np.newaxis]*chan_freqs[:,0]/(cc/100)
    vv = uvw[1,:][:,np.newaxis]*chan_freqs[:,0]/(cc/100)

    # toss out the autocorrelation placeholders
    xc = np.where(ant1 != ant2)[0]

    wgts = weight[0,:] + weight[1,:]

    uu_xc = uu[xc][:,flag]
    vv_xc = vv[xc][:,flag]
    wgts_xc = wgts[xc]

    dl = cell_size*arcsec
    dm = cell_size*arcsec

    du = 1./((npix)*dl)
    dv = 1./((npix)*dm)

    # create arrays to dump values
    rms = np.zeros((chans.shape[0], robust.shape[0]))
    beam_params = np.zeros((chans.shape[0],3, robust.shape[0]))

    # grid the weights outside of loop if not perchanweight, only need to do this once... 
    if perchanweight == False:
        gwgts_init = np.zeros((npix, npix))
        gwgts_init = grid_wgts(gwgts_init, np.ravel(uu_xc), np.ravel(vv_xc), du, dv, npix, np.ravel(np.broadcast_to(wgts_xc, (uu_xc.shape[1], uu_xc.shape[0])).T))

    if mod_pcwd == True:
        # TODO CHECK THIS FOR HALF PIXEL OFFSET
        uvdist_grid = np.sqrt(np.add.outer(np.arange(-(npix/2.)*du, (npix/2.)*du, du)**2, np.arange(-(npix/2.)*dv, (npix/2.)*dv, dv)**2))
        frac_bw = (np.max(chan_freqs) - np.min(chan_freqs)) / rfreq
        corr_fac = frac_bw*uvdist_grid/du
        corr_fac[corr_fac<1] = 1.

    for i, chan in enumerate(chans):
        print(chan)
        # grid the weights (with complex conjugates)
        if perchanweight == True:
            gwgts_init = np.zeros((npix, npix))
            gwgts_init = grid_wgts(gwgts_init, uu_xc[:,chan], vv_xc[:,chan], du, dv, npix, wgts_xc)  

        gwgts_init_sq = gwgts_init**2

        for j, r in enumerate(robust):
            # do the weighting, in each case for method/perchanweight selection
            if method == 'briggs':
                # calculate robust parameters
                # normalize differently if only using single channel; note that we assume the weights are not channelized and are uniform across channel
                if perchanweight == True:
                    if mod_pcwd == True:
                        f_sq = ((5*10**(-r))**2)/(np.sum(gwgts_init_sq)/(np.sum(wgts_xc)*2))
                    else:
                        f_sq = ((5*10**(-r))**2)/(np.sum(gwgts_init_sq)/(np.sum(wgts_xc)))
                else:
                    f_sq = ((5*10**(-r))**2)/(np.sum(gwgts_init_sq)/(np.sum(wgts_xc*uu_xc.shape[1])*2))

                if mod_pcwd==True:
                    gr_wgts = 1/(1+gwgts_init/corr_fac*f_sq)
                else:
                    gr_wgts = 1/(1+gwgts_init*f_sq)

                # multiply to get robust weights
                indexed_gr_wgts = ungrid_wgts(gr_wgts, uu_xc[:,chan], vv_xc[:,chan], du, dv, npix)
                wgts_robust = wgts_xc*indexed_gr_wgts
                wgts_robust_sq = wgts_xc*(indexed_gr_wgts)**2

            if method == 'briggsabs':
                # multiply to get robust weights
                S_sq = (gwgts_init[index_arr[chan,:,0], index_arr[chan,:,1]]*r**2).T
                indexed_gr_wgts = (1/(S_sq + 2*wgts_xc))
                wgts_robust = wgts_xc*indexed_gr_wgts
                wgts_robust_sq = wgts_xc*(indexed_gr_wgts)**2


            #get the total gridded weights (to make dirty beam)
            gwgts_final = np.zeros((npix, npix))
            gwgts_final = grid_wgts(gwgts_final, uu_xc[:,chan], vv_xc[:,chan], du, dv, npix, wgts_robust)           

            # create the dirty beam and calculate the beam parameters
            robust_beam = np.real(fftshift(fft2(fftshift(gwgts_final))))
            robust_beam /= np.max(robust_beam)
            #beam_params[i,:,j] = fit_beam(robust_beam, cell_size)
            beam_params[i,:,j] = fit_beam_CASA(robust_beam, cell_size)

            # calculate rms (formula from Briggs et al. 1995)
            C = 1/(2*np.sum(wgts_robust))
            rms[i,j] = 2*C*np.sqrt(np.sum(wgts_robust_sq))
            print(r, beam_params[i,:,j], rms[i,j]*1000.)
        
    return rms, beam_params
Ejemplo n.º 9
0
# Import required tools/tasks
from casatools import simulator, image, table, coordsys, measures, componentlist, quanta, ctsys, ms
from casatasks.private import simutil

from IPython.display import Markdown as md

# Instantiate all the required tools
sm = simulator()
ia = image()
tb = table()
cs = coordsys()
me = measures()
qa = quanta()
cl = componentlist()
mysu = simutil.simutil()
myms = ms()

import warnings
warnings.simplefilter("ignore", category=RuntimeWarning)


def plotData(msname='sim_data.ms', myplot='uv'):
    """
    Options : myplot='uv'
              myplot='data_spectrum'
              myplot='data_time'
    """
    from matplotlib.collections import LineCollection
    tb.open(msname)

    # UV coverage plot
Ejemplo n.º 10
0
def plotants( vis=None, figfile=None,
              antindex=None, logpos=None,
              exclude=None, checkbaselines=None,
              title=None, showgui=None ):
        """Plot the antenna distribution in the local reference frame:
                The location of the antennas in the MS will be plotted with
                X-toward local east; Y-toward local north.  The name of each
                antenna is shown next to its respective location.

                Keyword arguments:
                vis -- Name of input visibility file.
                                default: none. example: vis='ngc5921.ms'

                figfile -- Save the plotted figure in this file.
                                default: ''. example: figfile='myFigure.png'

                antindex -- Label antennas with name and antenna ID
                                default: False. example: antindex=True

                logpos -- Produce a logarithmic position plot.
                                default: False. example: logpos=True

                exclude -- antenna IDs or names to exclude from plotting
                                default: []. example: exclude=[2,3,4], exclude='DV15'

                checkbaselines -- Only plot antennas in the MAIN table.
                                This can be useful after a split.  WARNING:  Setting
                                checkbaselines to True will add to runtime in
                                proportion to the number of rows in the dataset.
                                default: False. example: checkbaselines=True

                title -- Title written along top of plot
                                default: ''

                You can zoom in by pressing the magnifier button (bottom,
                third from right) and making a rectangular region with
                the mouse.  Press the home button (left most button) to
                remove zoom.

                A hard-copy of this plot can be obtained by pressing the
                button on the right at the bottom of the display. A file
                dialog will allow you to choose the directory, filename,
                and format of the export.
        """

        # for CASA6, check for --nogui to force showgui to be False
        # showgui is also false if --agg or --pipeline is set
        # --pipeline also sets sets --agg, but --agg may not be used on the
        # command line so both --pipeline and --agg need to be checked here
        if is_CASA6 and ('--nogui' in sys.argv or '--agg' in sys.argv or '--pipeline' in sys.argv):
                showgui = False

        if not showgui:
                pl.close()
                pl.ioff()
        else:
                pl.show()
                pl.ion()
        pl.clf()

        # remove trailing / for title basename
        if vis[-1]=='/':
                vis = vis[:-1]
        myms = ms( )
        try:
                exclude = myms.msseltoindex(vis, baseline=exclude)['antenna1'].tolist()
        except RuntimeError as rterr:  # MSSelection failed
                errmsg = str(rterr)
                errmsg = errmsg.replace('specificion', 'specification')
                errmsg = errmsg.replace('Antenna Expression: ', '')
                raise RuntimeError("Exclude selection error: " + errmsg)

        telescope, names, ids, xpos, ypos, stations = getPlotantsAntennaInfo(vis,
                logpos, exclude, checkbaselines)
        if not names:
                raise ValueError("No antennas selected.  Exiting plotants.")

        if not title:
                msname = os.path.basename(vis)
                title = "Antenna Positions for "
                if len(msname) > 55:
                        title += '\n'
                title += msname

        if logpos:
                plotAntennasLog(telescope, names, ids, xpos, ypos, antindex, stations)
        else:
                plotAntennas(telescope, names, ids, xpos, ypos, antindex, stations, showgui)
        pl.title(title, {'fontsize':12})
        
        if figfile:
                pl.savefig(figfile)
Ejemplo n.º 11
0
def quicklook_line_imaging(
        myvis,
        thisgal,
        linespw_dict,
        nchan_vel=5,
        # channel_width_kms=20.,
        niter=0,
        nsigma=5.,
        imsize_max=800,
        overwrite_imaging=False,
        export_fits=True):

    if not os.path.exists("quicklook_imaging"):
        os.mkdir("quicklook_imaging")

    this_vsys = target_vsys_kms[thisgal]

    # Pick our line range based on the HI for all lines.
    this_velrange = target_line_range_kms[thisgal]['HI']
    # We have a MW foreground window on some targets. Skip this for the galaxy range.
    if isinstance(this_velrange[0], list):
        for this_range in this_velrange:
            if min(this_range) < this_vsys < max(this_range):
                this_velrange = this_range
                break

    # Check that the search for the right velocity range didn't fail
    if isinstance(this_velrange[0], list):
        raise ValueError(
            f"Unable to find range with target vsys ({this_vsys}) from {this_velrange}."
            f" Check the velocity ranges defined in target_setup.py for {thisgal}"
        )

    # width_vel = channel_width_kms
    # width_vel_str = f"{width_vel}km/s"

    start_vel = f"{int(min(this_velrange))}km/s"

    # nchan_vel = int(abs(this_velrange[0] - this_velrange[1]) / width_vel)

    width_vel = int(
        round(abs(this_velrange[0] - this_velrange[1]) / float(nchan_vel)))
    width_vel_str = f"{width_vel}km/s"

    # Select only the non-continuum SPWs
    line_spws = []
    for thisspw in linespw_dict:
        if "continuum" not in linespw_dict[thisspw]['label']:
            # Our 20A-346 tracks have a combined OH1665/1667 SPW. Split into separate cubes in this case
            line_labels = linespw_dict[thisspw]['label'].split("-")

            for line_label in line_labels:
                line_spws.append([str(thisspw), line_label])

    # Select our target fields. We will loop through
    # to avoid the time + memory needed for mosaics.

    synthutil = synthesisutils()

    myms = ms()

    # if no fields are provided use observe_target intent
    # I saw once a calibrator also has this intent so check carefully
    # mymsmd.open(vis)
    myms.open(myvis)

    mymsmd = myms.metadata()

    target_fields = mymsmd.fieldsforintent("*TARGET*", True)

    mymsmd.close()
    myms.close()

    t0 = datetime.datetime.now()

    # Loop through targets and line SPWs
    for target_field in target_fields:

        casalog.post(f"Quick look imaging of field {target_field}")

        # Loop through the SPWs to identify the biggest image size needed.
        # For ease downstream, we will use the same imsize for all SPWs.
        # NOTE: for L-band, that's a factor of ~2 difference. It may be more pronounced in other
        # bands

        cell_size = {}
        imsizes = []

        for thisspw_info in line_spws:

            thisspw, line_name = thisspw_info

            # Ask for cellsize
            this_im = imager()
            this_im.selectvis(vis=myvis, field=target_field, spw=str(thisspw))

            image_settings = this_im.advise()
            this_im.close()

            # When all data is flagged, uvmax = 0 so cellsize = 0.
            # Check for that case to avoid tclean failures
            # if image_settings[2]['value'] == 0.:
            #     casalog.post(f"All data flagged for {this_imagename}. Skipping")
            #     continue

            # NOTE: Rounding will only be reasonable for arcsec units with our L-band setup.
            # Could easily fail on ~<0.1 arcsec cell sizes.
            cell_size[thisspw] = [
                image_settings[2]['value'], image_settings[2]['unit']
            ]

            # No point in estimating image size for an empty SPW.
            if image_settings[2]['value'] == 0.:
                continue

            # For the image size, we will do an approx scaling was
            # theta_PB = 45 / nu (arcmin)
            this_msmd = msmetadata()
            this_msmd.open(myvis)
            mean_freq = this_msmd.chanfreqs(
                int(thisspw)).mean() / 1.e9  # Hz to GHz
            this_msmd.close()

            approx_pbsize = 1.2 * (45. / mean_freq) * 60  # arcsec
            approx_imsize = synthutil.getOptimumSize(
                int(approx_pbsize / image_settings[2]['value']))
            imsizes.append(approx_imsize)

        if len(imsizes) == 0:
            casalog.post(f"{target_field} is fully flagged. Skipping.")
            continue

        this_imsize = min(imsize_max, max(imsizes))

        for thisspw_info in line_spws:

            thisspw, line_name = thisspw_info

            casalog.post(
                f"Quick look imaging of field {target_field} SPW {thisspw}")

            target_field_label = target_field.replace('-', '_')

            this_imagename = f"quicklook_imaging/quicklook-{target_field_label}-spw{thisspw}-{line_name}-{myvis}"

            if export_fits:
                check_exists = os.path.exists(f"{this_imagename}.image.fits")
            else:
                check_exists = os.path.exists(f"{this_imagename}.image")

            if check_exists:
                if overwrite_imaging:
                    rmtables(f"{this_imagename}*")
                    os.remove(f"{this_imagename}.image.fits")
                else:
                    casalog.post(f"Found {this_imagename}. Skipping imaging.")
                    continue

            if cell_size[thisspw][0] == 0:
                casalog.post(
                    f"All data flagged for {this_imagename}. Skipping")
                continue

            this_cellsize = f"{round(cell_size[thisspw][0] * 0.8, 1)}{cell_size[thisspw][1]}"

            this_pblim = 0.5

            this_nsigma = nsigma
            this_niter = niter

            # Clean up any possible imaging remnants first
            rmtables(f"{this_imagename}*")

            tclean(vis=myvis,
                   field=target_field,
                   spw=str(thisspw),
                   cell=this_cellsize,
                   imsize=this_imsize,
                   specmode='cube',
                   weighting='briggs',
                   robust=0.0,
                   start=start_vel,
                   width=width_vel_str,
                   nchan=nchan_vel,
                   niter=this_niter,
                   nsigma=this_nsigma,
                   imagename=this_imagename,
                   restfreq=f"{linerest_dict_GHz[line_name]}GHz",
                   pblimit=this_pblim)

            if export_fits:
                exportfits(imagename=f"{this_imagename}.image",
                           fitsimage=f"{this_imagename}.image.fits",
                           history=False,
                           overwrite=True)

            # Clean-up extra imaging products if they are not needed.
            cleanup_misc_quicklook(this_imagename,
                                   remove_psf=True,
                                   remove_residual=this_niter == 0,
                                   remove_image=True if export_fits else False)

    t1 = datetime.datetime.now()

    casalog.post(f"Quicklook line imaging took {t1 - t0}")
Ejemplo n.º 12
0
def quicklook_continuum_imaging(myvis,
                                contspw_dict,
                                niter=0,
                                nsigma=5.,
                                imsize_max=800,
                                overwrite_imaging=False,
                                export_fits=True):
    '''
    Per-SPW MFS, nterm=1, dirty images of the targets
    '''

    if not os.path.exists("quicklook_imaging"):
        os.mkdir("quicklook_imaging")

    # Select only the continuum SPWs (in case there are any line SPWs).
    continuum_spws = []
    for thisspw in contspw_dict:
        if "continuum" in contspw_dict[thisspw]['label']:
            continuum_spws.append(str(thisspw))

    # Select our target fields. We will loop through
    # to avoid the time + memory needed for mosaics.

    synthutil = synthesisutils()

    myms = ms()

    # if no fields are provided use observe_target intent
    # I saw once a calibrator also has this intent so check carefully
    # mymsmd.open(vis)
    myms.open(myvis)

    mymsmd = myms.metadata()

    target_fields = mymsmd.fieldsforintent("*TARGET*", True)

    mymsmd.close()
    myms.close()

    t0 = datetime.datetime.now()

    # Loop through targets and line SPWs
    for target_field in target_fields:

        casalog.post(f"Quick look imaging of field {target_field}")

        cell_size = {}
        imsizes = []

        for thisspw in continuum_spws:

            # Ask for cellsize
            this_im = imager()
            this_im.selectvis(vis=myvis, field=target_field, spw=str(thisspw))

            image_settings = this_im.advise()
            this_im.close()

            # When all data is flagged, uvmax = 0 so cellsize = 0.
            # Check for that case to avoid tclean failures
            # if image_settings[2]['value'] == 0.:
            #     casalog.post(f"All data flagged for {this_imagename}. Skipping")
            #     continue

            # NOTE: Rounding will only be reasonable for arcsec units with our L-band setup.
            # Could easily fail on ~<0.1 arcsec cell sizes.
            cell_size[thisspw] = [
                image_settings[2]['value'], image_settings[2]['unit']
            ]

            # No point in estimating image size for an empty SPW.
            if image_settings[2]['value'] == 0.:
                continue

            # For the image size, we will do an approx scaling was
            # theta_PB = 45 / nu (arcmin)
            this_msmd = msmetadata()
            this_msmd.open(myvis)
            mean_freq = this_msmd.chanfreqs(
                int(thisspw)).mean() / 1.e9  # Hz to GHz
            this_msmd.close()

            approx_pbsize = 1.2 * (45. / mean_freq) * 60  # arcsec
            approx_imsize = synthutil.getOptimumSize(
                int(approx_pbsize / image_settings[2]['value']))
            imsizes.append(approx_imsize)

        if len(imsizes) == 0:
            casalog.post(f"{target_field} is fully flagged. Skipping.")
            continue

        this_imsize = min(imsize_max, max(imsizes))

        for thisspw in continuum_spws:

            casalog.post(
                f"Quick look imaging of field {target_field} SPW {thisspw}")

            target_field_label = target_field.replace('-', '_')

            this_imagename = f"quicklook_imaging/quicklook-{target_field_label}-spw{thisspw}-continuum-{myvis}"

            if export_fits:
                check_exists = os.path.exists(f"{this_imagename}.image.fits")
            else:
                check_exists = os.path.exists(f"{this_imagename}.image")

            if check_exists:
                if overwrite_imaging:
                    rmtables(f"{this_imagename}*")
                    os.remove(f"{this_imagename}.image.fits")
                else:
                    casalog.post(f"Found {this_imagename}. Skipping imaging.")
                    continue

            if cell_size[thisspw][0] == 0:
                casalog.post(
                    f"All data flagged for {this_imagename}. Skipping")
                continue

            this_cellsize = f"{round(cell_size[thisspw][0] * 0.8, 1)}{cell_size[thisspw][1]}"

            this_pblim = 0.5

            this_nsigma = nsigma
            this_niter = niter

            # Clean up any possible imaging remnants first
            rmtables(f"{this_imagename}*")

            tclean(vis=myvis,
                   field=target_field,
                   spw=str(thisspw),
                   cell=this_cellsize,
                   imsize=this_imsize,
                   specmode='mfs',
                   nterms=1,
                   weighting='briggs',
                   robust=0.0,
                   niter=this_niter,
                   nsigma=this_nsigma,
                   fastnoise=True,
                   imagename=this_imagename,
                   pblimit=this_pblim)

            if export_fits:
                exportfits(imagename=f"{this_imagename}.image",
                           fitsimage=f"{this_imagename}.image.fits",
                           history=False,
                           overwrite=True)

            # Clean-up extra imaging products if they are not needed.
            cleanup_misc_quicklook(this_imagename,
                                   remove_psf=True,
                                   remove_residual=this_niter == 0,
                                   remove_image=True if export_fits else False)

    t1 = datetime.datetime.now()

    casalog.post(f"Quicklook continuum imaging took {t1 - t0}")
Ejemplo n.º 13
0
def weight_multichan(msfile,
                     npix,
                     cell_size,
                     robust=0.,
                     method='briggs',
                     perchanweight=False,
                     mod_pcwd=False,
                     npixels=0):
    tb = casatools.table()
    ms = casatools.ms()

    # Use CASA table tools to get frequencies
    tb.open(msfile + "/SPECTRAL_WINDOW")
    chan_freqs = tb.getcol("CHAN_FREQ")
    rfreq = tb.getcol("REF_FREQUENCY")
    tb.close()

    # Use CASA table tools to get columns of UVW, DATA, WEIGHT, etc.
    tb.open(msfile, nomodify=False)
    flag = tb.getcol("FLAG")
    sigma = tb.getcol("SIGMA")
    uvw = tb.getcol("UVW")
    weight = tb.getcol("WEIGHT_SPECTRUM")
    ant1 = tb.getcol("ANTENNA1")
    ant2 = tb.getcol("ANTENNA2")
    tb.close()

    flag = np.logical_not(np.prod(flag, axis=(0, 2)).T)

    # break out the u, v spatial frequencies, convert from m to lambda
    uu = uvw[0, :][:, np.newaxis] * chan_freqs[:, 0] / (cc / 100)
    vv = uvw[1, :][:, np.newaxis] * chan_freqs[:, 0] / (cc / 100)

    # toss out the autocorrelation placeholders
    xc = np.where(ant1 != ant2)[0]

    wgts = (weight[0, :, :] + weight[1, :, :]).T

    uu_xc = uu[xc]
    vv_xc = vv[xc]
    wgts_xc = wgts[xc]

    dl = cell_size * arcsec
    dm = cell_size * arcsec

    du = 1. / ((npix) * dl)
    dv = 1. / ((npix) * dm)

    new_wgts = np.copy(weight)

    # grid the weights outside of loop if not perchanweight, only need to do this once...
    if perchanweight == False:
        gwgts_init = np.zeros((npix, npix))
        gwgts_init = grid_wgts(gwgts_init, np.ravel(uu_xc), np.ravel(vv_xc),
                               du, dv, npix, np.ravel(wgts_xc))

    if mod_pcwd == True:
        # TODO CHECK THIS FOR HALF PIXEL OFFSET
        uvdist_grid = np.sqrt(
            np.add.outer(
                np.arange(-(npix / 2.) * du, (npix / 2.) * du, du)**2,
                np.arange(-(npix / 2.) * dv, (npix / 2.) * dv, dv)**2))
        frac_bw = (np.max(chan_freqs) - np.min(chan_freqs)) / rfreq
        corr_fac = frac_bw * uvdist_grid / du
        corr_fac[corr_fac < 1] = 1.

    for chan in range(chan_freqs.shape[0]):
        print(chan)
        # grid the weights (with complex conjugates)
        if perchanweight == True:
            gwgts_init = np.zeros((npix, npix))
            gwgts_init = grid_wgts(gwgts_init, uu_xc[:, chan], vv_xc[:, chan],
                                   du, dv, npix, wgts_xc[:, chan])

        gwgts_init_sq = gwgts_init**2

        # do the weighting, in each case for method/perchanweight selection
        if method == 'briggs':
            # calculate robust parameters
            # normalize differently if only using single channel; note that we assume the weights are not channelized and are uniform across channel
            if perchanweight == True:
                if mod_pcwd == True:
                    f_sq = (
                        (5 * 10**(-r))**2) / (np.sum(gwgts_init_sq) /
                                              (np.sum(wgts_xc[:, chan]) * 2))
                else:
                    f_sq = ((5 * 10**(-r))**2) / (np.sum(gwgts_init_sq) /
                                                  (np.sum(wgts_xc[:, chan])))
            else:
                f_sq = ((5 * 10**(-robust))**2) / (np.sum(gwgts_init_sq) /
                                                   (np.sum(wgts_xc) * 2))

            if mod_pcwd == True:
                gr_wgts = 1 / (1 + gwgts_init / corr_fac * f_sq)
            else:
                gr_wgts = 1 / (1 + gwgts_init * f_sq)

            # multiply to get robust weights
            indexed_gr_wgts = ungrid_wgts(gr_wgts, uu_xc[:, chan],
                                          vv_xc[:, chan], du, dv, npix)
            new_wgts[0, chan, :] = wgts_xc[:, chan] * indexed_gr_wgts / 2.
            new_wgts[1, chan, :] = wgts_xc[:, chan] * indexed_gr_wgts / 2.

        if method == 'briggsabs':
            # multiply to get robust weights
            S_sq = (gwgts_init[index_arr[chan, :, 0], index_arr[chan, :, 1]] *
                    r**2).T
            indexed_gr_wgts = (1 / (S_sq + 2 * wgts_xc[:, chan]))
            new_wgts[0, chan, :] = wgts_xc[:, chan] * indexed_gr_wgts / 2.
            new_wgts[1, chan, :] = wgts_xc[:, chan] * indexed_gr_wgts / 2.

    tb.open(msfile, nomodify=False)
    tb.putcol("WEIGHT_SPECTRUM", new_wgts)
    tb.close()

    return
def make_flagsummary_freq_data(myvis, output_folder='perfield_flagfraction',
                               intent="*", overwrite=False):
    '''
    This mimics the summary plots made by flagdata, but removes the interactive
    part so we can save it.
    '''

    from casatools import ms

    from casatasks import flagdata

    myms = ms()

    myms.open(myvis)

    mymsmd = myms.metadata()

    fieldsnums = mymsmd.fieldsforintent(intent)

    if len(fieldsnums) == 0:
        raise ValueError("No calibrator intents are in this MS.")

    fields = np.array(mymsmd.fieldnames())[fieldsnums]

    spw_nums = mymsmd.spwsforscan(1)

    casalog.post(f"Selecting on fields: {fields}")
    print(f"Selecting on fields: {fields}")

    for field in fields:

        casalog.post(f"Creating freq. flagging fraction for {field}")
        print(f"Creating freq. flagging fraction for {field}")

        save_name = f"{output_folder}/field_{field}_flagfrac_freq.txt"

        if os.path.exists(save_name) and overwrite:
            os.system(f"rm {save_name}")

        if not os.path.exists(save_name):

            flag_dict = flagdata(vis=myvis, mode='summary', spwchan=True, action='calculate',
                                field=field)

            flag_data = []

            for spw in spw_nums:
                spw_freqs = mymsmd.chanfreqs(spw) / 1e9  # GHz

                spw_flagfracs = []
                for chan in range(len(spw_freqs)):
                    spw_flagfracs.append(flag_dict['spw:channel'][f"{spw}:{chan}"]['flagged'] / flag_dict['spw:channel'][f'{spw}:{chan}']['total'])

                # Make an equal length SPW column
                spw_labels = [spw] * len(spw_freqs)

                flag_data.append([spw_labels, np.arange(len(spw_freqs)), spw_freqs, spw_flagfracs])

            output_data = np.hstack(flag_data).T

            np.savetxt(save_name, output_data, header="spw,channel,freq,frac")

        else:
            casalog.post(message="File {} already exists. Skipping".format(save_name),
                         origin='make_qa_tables')


    mymsmd.close()
    myms.close()
def make_flagsummary_uvdist_data(myvis, nbin=25, output_folder="perfield_flagfraction",
                                 intent='*', overwrite=False):
    '''
    Make a binned flagging fraction vs. uv-distance.
    '''

    from casatools import ms

    from casatasks import flagdata

    myms = ms()

    myms.open(myvis)

    mymsmd = myms.metadata()

    # Get VLA antenna ID
    antenna_names = mymsmd.antennanames() #returns a list that corresponds to antenna ID

    # Get fields matching intent
    fieldsnums = mymsmd.fieldsforintent(intent)

    if len(fieldsnums) == 0:
        raise ValueError("No calibrator intents are in this MS.")

    fields = np.array(mymsmd.fieldnames())[fieldsnums]

    # Get SPWs
    spw_list = mymsmd.spwsforfield(fieldsnums[0])

    casalog.post(f"Selecting on fields: {fields}")
    print(f"Selecting on fields: {fields}")

    for field in fields:

        casalog.post(f"Creating uvdist flagging fraction for {field}")
        print(f"Creating uvdist flagging fraction for {field}")

        baseline_flagging_table = []

        save_name = f"{output_folder}/field_{field}_flagfrac_uvdist.txt"

        if os.path.exists(save_name) and overwrite:
            os.system(f"rm {save_name}")

        if not os.path.exists(save_name):

            for spw in spw_list:

                flag_dict = flagdata(vis=myvis, mode='summary', basecnt=True, action='calculate',
                                    field=field, spw=str(spw))

                # Make plot of flagging statistics

                # Get information for flagging percentage vs. uvdistance
                myms.selectinit()
                myms.selectchannel(1, 0, 1, 1) # look at data just for first channel - easily translates
                gantdata = myms.getdata(['antenna1','antenna2','uvdist']) # get the points I need

                # create adictionary with flagging info
                base_dict = create_baseline_dict(antenna_names, gantdata)

                # match flagging data to dictionary entry
                datamatch = flag_match_baseline(flag_dict['baseline'], base_dict)

                # 25 is the number of uvdist bins such that there is minimal error in uvdist.
                binned_stats, barwidth = bin_statistics(datamatch, nbin)

                spw_vals = [spw] * len(binned_stats[0])
                field_vals = [field] * len(binned_stats[0])

                baseline_flagging_table.append([field_vals, spw_vals, binned_stats[0], binned_stats[1]])

            baseline_flagging_table_hstack = np.hstack(baseline_flagging_table).T

            out_table = np.zeros(baseline_flagging_table_hstack.shape[0],
                                dtype=[("field", 'U32'),
                                        ('spw', int),
                                        ('uvdist', float),
                                        ('frac', float)])

            out_table['field'] = baseline_flagging_table_hstack[:, 0].astype('U32')
            out_table['spw'] = baseline_flagging_table_hstack[:, 1].astype(int)
            out_table['uvdist'] = baseline_flagging_table_hstack[:, 2].astype(float)
            out_table['frac'] = baseline_flagging_table_hstack[:, 3].astype(float)

            np.savetxt(save_name, out_table, fmt='%s %d %f %f', header="field,spw,uvdist,frac")


    mymsmd.close()
    myms.close()
Ejemplo n.º 16
0
def create_spw_dict(myvis, min_continuum_chanwidth_kHz=50,
                    save_spwdict=False, spwdict_filename="spw_definitions.npy"):
    '''
    Create the SPW dict from MS metadata. Split based on continuum and
    use the line dictionary to match line identifications.
    '''

    from casatools import ms

    myms = ms()
    myms.open(myvis)

    metadata = myms.metadata()

    # metadata = msmdtool()

    # metadata.open(myvis)

    spw_dict = {}

    science_scans = metadata.scansforintent("*TARGET*")
    science_field0 = metadata.fieldsforscan(science_scans[0])[0]

    # Our SPW setup is the same for all fields.
    spw_ids = metadata.spwsforfield(science_field0)

    gal_vsys = None

    # Identify which target we're looking at.
    # Some of the archival data has a setup scan labeled as a target.
    # Because of this, we will loop through targets until we find one defined
    # in our target dictionary.
    for targ_scan in science_scans:

        targ_fieldname = metadata.fieldnames()[metadata.fieldsforscan(targ_scan)[0]]

        gal_vsys = None
        for gal in target_vsys_kms:

            if gal in targ_fieldname:
                gal_vsys = target_vsys_kms[gal]
                break

        if gal_vsys is not None:
            break

    if gal_vsys is None:
        raise ValueError("Cannot identify which target galaxy is observed"
                         " from field name {}".format(targ_fieldname))

    # Below is a sketch of doing this for all target fields if there are
    # multiple target galaxies.
    # np.array(metadata.fieldnames())[metadata.fieldsforintent("*TARGET*")]

    # Convert rest to observed based on the target
    lineobs_dict_GHz = {}

    for line in linerest_dict_GHz:

        lineobs_dict_GHz[line] = lines_rest2obs(linerest_dict_GHz[line], gal_vsys)

    # Counters for continuum windows in basebands A0C0, B0D0.
    cont_A_count = 0
    cont_B_count = 0

    # Populate the SPW info.
    for spwid in spw_ids:

        # Original name
        spw_name = metadata.namesforspws(spwid)[0]

        # Channel width
        chan_width = metadata.chanwidths(spwid)[0]

        # Bandwidth
        band_width = metadata.bandwidths(spwid)

        # N chans
        nchan = metadata.nchan(spwid)

        # Centre freq.
        # ctr_freq = metadata.chanfreqs
        freqs_lsrk = myms.cvelfreqs(spwids=[spwid], outframe='LSRK')
        freqs_topo = myms.cvelfreqs(spwids=[spwid], outframe='TOPO')

        # Convert from Hz to kHz
        ctr_freq = freqs_lsrk[nchan // 2 - 1] / 1e3

        freq_0_topo = freqs_topo.min()

        # Baseband
        bband = spw_name.split("#")[1]

        # Ncorr
        # ncorr = metadata.ncorrforpol(spwid)

        # Check if continuum or not. If so, assign a unique tag with
        # baseband and number.
        if chan_width >= min_continuum_chanwidth_kHz * 1e3:

            if bband.startswith("A"):
                spw_label = "continuum_A{}".format(cont_A_count)
                cont_A_count += 1
            else:
                spw_label = "continuum_B{}".format(cont_B_count)
                cont_B_count += 1

        # Otherwise do a line match
        else:

            line_match = []

            for line in lineobs_dict_GHz:

                obs_freq = lineobs_dict_GHz[line] * 1e9

                if obs_freq > freqs_lsrk.min() and obs_freq < freqs_lsrk.max():

                    line_match.append(line)

            if len(line_match) == 0:
                raise ValueError("Unable to match spectral line.")

            spw_label = "-".join(line_match)

        spw_dict[spwid] = {'label': spw_label,
                           'origname': spw_name,
                           'chanwidth': chan_width,
                           'bandwidth': band_width,
                           # 'ncorr': ncorr,
                           'centerfreq': ctr_freq,
                           'baseband': bband,
                           'freq_0_topo': freq_0_topo}

    myms.close()

    if save_spwdict:
        # Remove existing saved file
        if os.path.exists(spwdict_filename):
            os.remove(spwdict_filename)

        # Save a pickled version of the dictionary as a npy file
        np.save(spwdict_filename, spw_dict)

    return spw_dict
Ejemplo n.º 17
0
    msmd = msmdtool()
    ia = iatool()
    tb = tbtool()
    ms = mstool()
except (ImportError, ModuleNotFoundError):
    from casatasks import tclean, split, flagdata
    from casaplotms import plotms

    from casatasks import clearcal, gaincal, rmtables, applycal, exportfits

    from casatools import msmetadata, image, table, ms
    msmd = msmetadata()
    ia = image()
    tb = table()
    ms = ms()

imaging_root = "imaging_results"
if not os.path.exists(imaging_root):
    os.mkdir(imaging_root)

# Command line options
if from_cmd:
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('-c', nargs=1, help='Casa parameter')
    parser.add_argument('--exclude7M',
                        action='store_true',
                        help='Include 7M data')
    parser.add_argument('--only7M',
                        action='store_true',
Ejemplo n.º 18
0
import os
import shutil
import numpy as np
from suncasa.utils import signal_utils as su
import sys

if sys.version_info.major > 2:
    from casatools import ms, quanta, msmetadata
    from casatasks import casalog

    casalog.showconsole(True)
    datams = ms()
    ms_in = ms()
    datamsmd = msmetadata()
    qa = quanta()
else:
    from taskinit import ms, qa, mstool, msmdtool, casalog

    datams = mstool()
    ms_in = mstool()
    datamsmd = msmdtool()

# from taskinit import *
# from callibrary import *
# import pdb


def subvs2(vis=None,
           outputvis=None,
           timerange='',
           spw='',
Ejemplo n.º 19
0
def import_data_ms(filename):
    """Imports data from a CASA measurement set and returns visibility object"""

    tb = table()
    ms_ = ms()

    # Antenna information
    tb.open(filename)
    data = tb.getcol("DATA")
    uvw = tb.getcol("UVW")
    weight = tb.getcol("WEIGHT")
    ant1 = tb.getcol("ANTENNA1")
    ant2 = tb.getcol("ANTENNA2")
    flags = tb.getcol("FLAG")
    time = tb.getcol("TIME")
    tb.close()

    # Spectral window information
    ms_.open(filename)
    spw_info = ms_.getspectralwindowinfo()
    nchan = spw_info["0"]["NumChan"]
    npol = spw_info["0"]["NumCorr"]
    ms_.close()

    # Frequency information
    tb.open(filename+"/SPECTRAL_WINDOW")
    freqs = tb.getcol("CHAN_FREQ")
    rfreq = tb.getcol("REF_FREQUENCY")
    resolution = tb.getcol("CHAN_WIDTH")
    tb.close()

    uu = uvw[0, :]
    vv = uvw[1, :]

    # Check if pols are already averaged
    data = np.squeeze(data)
    weight = np.squeeze(weight)
    flags = np.squeeze(flags)

    if npol == 1:
        Re = data.real
        Im = data.imag
        wgts = weight

    else:
        # Polarization averaging
        Re_xx = data[0, :].real
        Re_yy = data[1, :].real
        Im_xx = data[0, :].imag
        Im_yy = data[1, :].imag
        weight_xx = weight[0, :]
        weight_yy = weight[1, :]
        flags = flags[0, :]*flags[1, :]

        # Weighted averages
        with np.errstate(divide='ignore', invalid='ignore'):
            Re = np.where((weight_xx + weight_yy) != 0, (Re_xx * weight_xx + \
                 Re_yy*weight_yy) / (weight_xx + weight_yy), 0.)
            Im = np.where((weight_xx + weight_yy) != 0, (Im_xx * weight_xx + \
                 Im_yy*weight_yy) / (weight_xx + weight_yy), 0.)
        wgts = (weight_xx + weight_yy)

    # Toss out the autocorrelations
    xc = np.where(ant1 != ant2)[0]

    # Check if there's only a single channel
    if nchan == 1:
        data_real = Re[np.newaxis, xc]
        data_imag = Im[np.newaxis, xc]
        flags = flags[xc]
    else:
        data_real = Re[:, xc]
        data_imag = Im[:, xc]
        flags = flags[:, xc]
        time = time[xc]

        # If the majority of points in any channel are flagged, it probably
        # means an entire channel is flagged - spit warning
        if np.mean(flags.all(axis=0)) > 0.5:
            print('WARNING: Over half of the (u,v) points in at least one '\
                  'channel are marked as flagged. If you did not expect this, it is '\
                  'likely due to having an entire channel flagged in the ms. Please '\
                  'double check this and be careful if model fitting or using diff mode.')

        # Collapse flags to single channel, because weights are not currently channelized
        flags = flags.any(axis=0)

    data_wgts = wgts[xc]
    data_uu = uu[xc]
    data_vv = vv[xc]

    ant1 = ant1[xc]
    ant2 = ant2[xc]

    data_VV = data_real + 1j*data_imag

    # Warning that flagged data was imported
    if np.any(flags):
        print('WARNING: Flagged data was imported. Visibility interpolation can '\
              'proceed normally, but be careful with chi^2 calculations.')

    return Visibility(data_VV.T, data_uu, data_vv, data_wgts, freqs, time, \
                      resolution, ant1, ant2, flags)
Ejemplo n.º 20
0
def flag_hi_foreground(myvis,
                       calibrator_line_range_kms,
                       hi_spw_num,
                       cal_intents=["CALIBRATE*"],
                       test_print=False,
                       test_run=False):
    '''
    Define velocity regions to flag for all (or chosen) calibration
    fields based on intent.

    Parameters
    ----------
    myvis : str
        MS name.
    calibrator_line_range_kms : dict
        Dictionary with velocity range (in LSRK; radio) to flag.
    hi_spw_num : int
        The SPW of HI in the MS. If None is given, the context is used to
        identify the SPW overlapping the HI line (where we can ignore wideband
        continuum SPWs).
    cal_intents : list, optional
        List of the calibrator field intents to apply flagging to.
    test_print : bool, optional
        Print out additional information for testing purposes.

    '''

    # Check context for the calibration sources given the list of intents
    # to flag.

    # Loop through calibrator sources, calling target_foreground_hi_ranges
    # Flag the requested range.

    # Make a new flagging version marking these calls at the end.

    # from taskinit import msmdtool, mstool
    from casatools import ms

    from casatasks import flagdata, flagmanager

    # msmd = msmdtool()
    # ms = mstool()

    # mymsmd = msmd()
    myms = ms()

    # if no fields are provided use observe_target intent
    # I saw once a calibrator also has this intent so check carefully
    # mymsmd.open(myvis)
    myms.open(myvis)

    mymsmd = myms.metadata()

    # Loop through field intents. Default is all calibrators.
    field_nums = []
    for cal_intent in cal_intents:
        field_num = mymsmd.fieldsforintent(cal_intent)

        field_nums.extend(list(field_num))

    # Unique mapping
    field_nums = np.array(list(set(field_nums)))

    field_names = np.asarray(mymsmd.fieldnames())[field_nums]

    mymsmd.close()

    # Loop through the field names, identify in calibrator_line_range_kms,
    # and convert mapping from velocity -> freq (LSRK) -> channel.
    # myms.open(myvis)

    freqs_lsrk = myms.cvelfreqs(spwids=[hi_spw_num], outframe='LSRK')

    myms.close()

    # in Hz
    hi_restfreq = 1.420405752e9
    vels_lsrk = lines_freq2vels(freqs_lsrk, hi_restfreq)

    for field in field_names:

        if field not in calibrator_line_range_kms:
            casalog.post('Unable to locate calibrator {} in calibrator list.'.format(field))
            casalog.post('Check `calibrator_setup.py` to see if this source is missing')

            continue

        vel_start = calibrator_line_range_kms[field]['HI'][0]
        vel_stop = calibrator_line_range_kms[field]['HI'][1]

        # Keep red to blue shifted order.
        if vel_start < vel_stop:
            vel_stop, vel_start = vel_start, vel_stop

        chan_start = np.abs(vels_lsrk - vel_start).argmin()
        chan_stop = np.abs(vels_lsrk - vel_stop).argmin()

        # Do the flagging and save a new version

        if test_print:
            print('Field {0} flagging region {1}:{2}~{3}'.format(field, hi_spw_num,
                                                                 chan_start, chan_stop))
            print('Velocity: {0}, {1}'.format(vel_start, vel_stop))

        if test_run:
            continue

        flagdata(myvis, mode='manual', field=field,
                 spw='{0}:{1}~{2}'.format(hi_spw_num, chan_start, chan_stop),
                 flagbackup=False)

    if not test_run:
        flagmanager(myvis, mode='save', versionname='MW_HI_abs_flagging',
                    comment='Flag Milky Way HI absorption for calibrators.')
Ejemplo n.º 21
0
def run_all_uvstats(myvis,
                    out_path,
                    uv_threshold=3,
                    uv_nsigma=3,
                    try_phase_selfcal=True,
                    cleanup_calsplit=True,
                    cleanup_phaseselfcal=True,
                    remake_split=True):

    if not os.path.isdir(out_path):
        os.mkdir(out_path)

    from casatasks import split, gaincal, applycal
    from casaplotms import plotms

    # from taskinit import msmdtool, casalog
    # from taskinit import msmdtool, casalog

    from casatools import ms

    myms = ms()

    # msmd = msmdtool()
    # get metadata

    myms.open(myvis)
    mymsmd = myms.metadata()

    # mymsmd.open(myvis)
    cal_fields = np.unique(mymsmd.fieldsforintent('CALIBRATE*'))
    field_names = mymsmd.namesforfields(cal_fields)
    myms.close()

    # split calibrator visibilities
    field_str = ','.join([str(f) for f in cal_fields])

    output_cal_ms = out_path + '/cal_fields.ms'

    if os.path.exists(output_cal_ms) and remake_split:
        os.system('rm -r {0}'.format(output_cal_ms))

    if not os.path.exists(output_cal_ms):
        split(vis=myvis,
              field=field_str,
              keepflags=True,
              timebin='0s',
              outputvis=output_cal_ms)

    # There are the flux cals which have built in models in CASA.
    skip_fields = ['3C286', '3C48', '3C147', '3C138']

    gaincal_tables = []

    for field_name in field_names:

        if np.any([field_name in skip1 for skip1 in skip_fields]):
            continue

        plotms_outfile = out_path + '/plotms_amp_uvwave_field_{0}.txt'.format(
            field_name)
        casalog.post(
            message='Exporting from plotms: {0}'.format(plotms_outfile),
            origin='run_all_uvstats')

        if not os.path.exists(plotms_outfile):
            plotms(vis=out_path + '/cal_fields.ms',
                   field=field_name,
                   xaxis='UVwave',
                   yaxis='Amp',
                   ydatacolumn='data',
                   averagedata=True,
                   scalar=False,
                   avgchannel='4096',
                   avgtime='1000',
                   avgscan=False,
                   correlation='RR,LL',
                   plotfile=plotms_outfile,
                   showgui=False,
                   overwrite=True)
        else:
            casalog.post(message='File {0} already exists. Skipping'.format(
                plotms_outfile),
                         origin='run_all_uvstats')

        infile = out_path + '/plotms_amp_uvwave_field_{0}.txt'.format(
            field_name)
        casalog.post(message='Analyzing UV stats for {0}'.format(infile),
                     origin='run_all_uvstats')

        # Read in txt from plotms
        dat, median_flux = get_uvdata(infile)
        # n_scans = len(np.unique(dat['scan']))
        binned_dat = bin_uvdata(dat)
        binned_dat_perscan = bin_uvdata_perscan(dat)
        if binned_dat.shape[1] == 0:
            continue
        if binned_dat_perscan.shape[1] == 0:
            continue
        plot_uvdata_perscan(binned_dat_perscan,
                            binned_dat,
                            infile,
                            bin_type='combined')

        # try phase-only selfcal
        if try_phase_selfcal:

            gaincal_table = out_path + '/cal_field_{0}.g'.format(field_name)

            try:
                gaincal(vis=output_cal_ms,
                        caltable=gaincal_table,
                        field=field_name,
                        solint='int',
                        refant='',
                        calmode='p')
                applycal(vis=output_cal_ms,
                         gaintable=gaincal_table,
                         field=field_name,
                         calwt=False)

                gaincal_tables.append(gaincal_table)

                plotms_outfile = out_path + '/plotms_amp_uvwave_cal_field_{0}.txt'.format(
                    field_name)
                casalog.post(message='Exporting from plotms: {0}'.format(
                    plotms_outfile),
                             origin='run_all_uvstats')

                if not os.path.exists(plotms_outfile):

                    plotms(vis=output_cal_ms,
                           field=field_name,
                           xaxis='UVwave',
                           yaxis='Amp',
                           ydatacolumn='corrected',
                           averagedata=True,
                           scalar=False,
                           avgchannel='4096',
                           avgtime='1000',
                           avgscan=False,
                           correlation='RR,LL',
                           plotfile=plotms_outfile,
                           showgui=False,
                           overwrite=True)

                else:
                    casalog.post(
                        message='File {0} already exists. Skipping'.format(
                            plotms_outfile),
                        origin='run_all_uvstats')

                infile = out_path + '/plotms_amp_uvwave_cal_field_{0}.txt'.format(
                    field_name)
                casalog.post(
                    message='Analyzing UV stats for {0}'.format(infile),
                    origin='run_all_uvstats')

                dat, median_flux = get_uvdata(infile)
                # n_scans = len(np.unique(dat['scan']))
                binned_dat = bin_uvdata(dat)
                binned_dat_perscan = bin_uvdata_perscan(dat)
                if binned_dat.shape[1] == 0:
                    continue
                if binned_dat_perscan.shape[1] == 0:
                    continue
                plot_uvdata_perscan(binned_dat_perscan,
                                    binned_dat,
                                    infile,
                                    bin_type='combined')

            except:
                casalog.post(
                    message='Problem calibrating field {0}'.format(field_name),
                    origin='run_all_uvstats')

    # Delete calibrator split
    if cleanup_calsplit:
        os.system("rm -r {}".format(output_cal_ms))
        os.system("rm -r {}.flagversions".format(output_cal_ms))

    # Delete gaincal tables
    if cleanup_phaseselfcal:
        for gaincal_table in gaincal_tables:
            os.system("rm -r {}".format(gaincal_table))
Ejemplo n.º 22
0
# test_CAS_6733 checks for an infinite loop bug
# test_avgInterval checks for the existence of the average int information
# test_listunfl checks that unflagged information is displayed by listobs
#
###########################################################################
CASA6 = False
import sys
import os
import unittest
import hashlib
import subprocess
try:
    import casatools
    from casatasks import partition, split, listobs, casalog
    from casatools.platform import bytes2str
    ms = casatools.ms()
    CASA6 = True
except ImportError:
    from __main__ import default
    from tasks import *
    from taskinit import *

# If the test is being run in CASA6 use the new method to get the CASA path
if CASA6:
    datapath = casatools.ctsys.resolve('/data/regression/unittest/listobs')

else:
    dataroot = os.environ.get('CASAPATH').split()[0] + '/data/regression/'
    datapath = dataroot + 'unittest/listobs/'

    # Generate the test data