Beispiel #1
0
def get_normalized_side_channel(im, pxwidth, a=None, maxSide=None):
    if a is None:
        # remove eventual angle
        eim = cr.Scharr_edge(im)
        nanmask = np.isnan(eim)
        eim[nanmask] = 0
        eim = ir.rotate_scale(eim, np.pi / 4, 1)
        a = ir.orientation_angle(eim > np.percentile(eim, 95)) + np.pi / 4
        # rotate
        im2 = ir.rotate_scale(im, -a, 1, borderValue=np.nan)

        # remove eventual angle
        if maxSide is None:
            half = int(np.mean(im.shape) // 4)
            maxSide = [
                im2[:half, :], im2[:, -half:], im2[-half:, :], im2[:, :half]
            ]
            maxSide = np.asarray([np.nansum(i) for i in maxSide])
            maxSide = maxSide.argmax()
        a += np.pi / 2 * maxSide
    im = ir.rotate_scale(im, -a, 1, borderValue=np.nan)

    # find the channel position
    prof = np.diff(np.nanmedian(im, 1))
    nvalid = np.sum(np.isfinite(np.diff(im, axis=0)), 1)
    valid = (np.isfinite(prof) & (nvalid >= np.median(nvalid) * 4 / 5))
    prof[~valid] = np.nan
    prof[valid] = scipy.signal.savgol_filter(prof[valid], 21, 3)
    top_idx = np.nanargmin(prof[5:-5]) + 5

    border = int(np.ceil(np.abs(np.tan(ir.clamp_angle(a)) * im.shape[0])))
    xSlice = gfilter(np.nanmean(im[top_idx:, :], 0), 1)[border:-border]
    xSlice = xSlice - np.nanmedian(xSlice)

    cpos = center(xSlice, pxwidth) + border

    left_idx = cpos - pxwidth / 2
    right_idx = cpos + pxwidth / 2

    return a, int(np.round(left_idx)), int(np.round(right_idx)), int(
        np.round(top_idx))
Beispiel #2
0
def main(ms_input,
         input_colname,
         output_data_colname,
         output_weights_colname,
         baseline_file,
         delta_theta_deg,
         target_peak_reduction_factor=0.99):
    """
    Pre-average data using a sliding Gaussian kernel in frequency

    Parameters
    ----------
    ms_input : str
        MS filename
    input_colname : str
        Name of the column in the MS from which the data are read
    output_data_colname : str
        Name of the column in the MS into which the averaged data are written
    output_weights_colname : str
        Name of the column in the MS into which the averaged data weights are
        written
    baseline_file : str
        Filename of pickled baseline lengths
    delta_theta_deg : float
        Radius of calibration region in degrees
    target_peak_reduction_factor : float, optional
        Target reduction in peak flux density. Note: this reduction is in
        addition to any incurred by earlier averaging

    """
    if os.path.exists(baseline_file):
        f = open(baseline_file, 'r')
        baseline_dict = pickle.load(f)
        f.close()
    else:
        print('Cannot find baseline_file. Exiting...')
        sys.exit(1)
    delta_theta_deg = float(delta_theta_deg)
    target_peak_reduction_factor = float(target_peak_reduction_factor)

    ms = pt.table(ms_input, readonly=False, ack=False)
    ant1_list = ms.getcol('ANTENNA1')
    ant2_list = ms.getcol('ANTENNA2')
    data_all = ms.getcol(input_colname)
    weights_all = ms.getcol('WEIGHT_SPECTRUM')
    flags = ms.getcol('FLAG')

    # Get lowest frequency of MS and channel width
    sw = pt.table(ms_input + '::SPECTRAL_WINDOW', ack=False)
    freq_hz = sw.col('CHAN_FREQ')[0][0]
    chan_width_hz = sw.col('CHAN_WIDTH')[0][0]

    flags[np.isnan(data_all)] = True  # flag NaNs
    weights_all = weights_all * ~flags  # set weight of flagged data to 0

    # Check that all NaNs are flagged
    if np.count_nonzero(np.isnan(data_all[~flags])) > 0:
        logging.error('NaNs in unflagged data in {0}!'.format(ms_input))
        sys.exit(1)

    # Weight data and set bad data to 0 so nans do not propagate
    data_all = np.nan_to_num(data_all * weights_all)

    # Iteration on baseline combination
    for ant in itertools.product(set(ant1_list), set(ant2_list)):
        if ant[0] >= ant[1]:
            continue
        sel1 = np.where(ant1_list == ant[0])[0]
        sel2 = np.where(ant2_list == ant[1])[0]
        sel_list = sorted(list(frozenset(sel1).intersection(sel2)))

        data = data_all[sel_list, :, :]
        weights = weights_all[sel_list, :, :]

        # compute the Gaussian sigma from the max bandwidth over which we
        # can average and avoid significant bandwidth smearing but limited to
        # no more than 3 MHz (to avoid smoothing over the beam-induced effects)
        lambda_km = 299792.458 / freq_hz
        dist_km = baseline_dict['{0}-{1}'.format(ant[0], ant[1])]
        resolution_deg = lambda_km / dist_km * 180.0 / np.pi
        stddev_hz = min(
            3e6,
            get_target_bandwidth(freq_hz, delta_theta_deg, resolution_deg,
                                 target_peak_reduction_factor) / 4.0)
        stddev_nchan = stddev_hz / chan_width_hz * np.sqrt(0.5 / dist_km)

        # smear weighted data and weights
        dataR = gfilter(np.real(data), stddev_nchan, axis=1)
        dataI = gfilter(np.imag(data), stddev_nchan, axis=1)
        weights = gfilter(weights, stddev_nchan, axis=1)

        # re-create data
        data = (dataR + 1j * dataI)
        data[(weights != 0)] /= weights[(weights != 0)]  # avoid divbyzero
        data_all[sel_list, :, :] = data
        weights_all[sel_list, :, :] = weights

    # Add the output columns if needed
    if output_data_colname not in ms.colnames():
        desc = ms.getcoldesc(input_colname)
        desc['name'] = output_data_colname
        ms.addcols(desc)
    if output_weights_colname not in ms.colnames():
        desc = ms.getcoldesc('WEIGHT_SPECTRUM')
        desc['name'] = output_weights_colname
        ms.addcols(desc)

    ms.putcol(output_data_colname, data_all)
    ms.putcol('FLAG', flags)  # this saves flags of nans, which is always good
    ms.putcol(output_weights_colname, weights_all)
    ms.close()
Beispiel #3
0
     if stddev < 0.5: continue # avoid very small smoothing
 
     flags[ np.isnan(data) ] = True # flag NaNs
     weights[flags] = 0 # set weight of flagged data to 0
     del flags
     
     # Multiply every element of the data by the weights, convolve both the scaled data and the weights, and then
     # divide the convolved data by the convolved weights (translating flagged data into weight=0). That's basically the equivalent of a
     # running weighted average with a Gaussian window function.
     
     # set bad data to 0 so nans do not propagate
     data = np.nan_to_num(data*weights)
     
     # smear weighted data and weights
     if options.onlyamp:
         dataAMP = gfilter(np.abs(data), stddev, axis=0)
         dataPH = np.angle(data)
     else:
         dataR = gfilter(np.real(data), stddev, axis=0)#, truncate=4.)
         dataI = gfilter(np.imag(data), stddev, axis=0)#, truncate=4.)
 
     weights = gfilter(weights, stddev, axis=0)#, truncate=4.)
 
     # re-create data
     if options.onlyamp:
         data = dataAMP * ( np.cos(dataPH) + 1j*np.sin(dataPH) )
     else:
         data = (dataR + 1j * dataI)
     data[(weights != 0)] /= weights[(weights != 0)] # avoid divbyzero
 
     #print np.count_nonzero(data[~flags]), np.count_nonzero(data[flags]), 100*np.count_nonzero(data[flags])/np.count_nonzero(data)
Beispiel #4
0
        sel = (ant1 == ant[0]) & (ant2 == ant[1])

        #Multiply every element of the data by the weights, convolve both the scaled data and the weights, and then
        #divide the convolved data by the convolved weights (translating flagged data into weight=0). That's basically the equivalent of a
        #running weighted average with a Gaussian window function.

        # get cycle values
        weights = all_weights[sel]
        data = all_data[sel]

        # set bad data to 0 so nans do not propagate
        data = np.nan_to_num(data * weights)

        # smear weighted data and weights
        if options.onlyamp:
            dataAMP = gfilter(np.abs(data), stddevs[i], axis=0)
            dataPH = np.angle(data)
        else:
            dataR = gfilter(np.real(data), stddevs[i], axis=0)  #, truncate=4.)
            dataI = gfilter(np.imag(data), stddevs[i], axis=0)  #, truncate=4.)

        weights = gfilter(weights, stddevs[i], axis=0)  #, truncate=4.)

        # re-create data
        if options.onlyamp:
            data = dataAMP * (np.cos(dataPH) + 1j * np.sin(dataPH))
        else:
            data = (dataR + 1j * dataI)
        data[(weights != 0)] /= weights[(weights != 0)]  # avoid divbyzero
        all_data[sel] = data
        all_weights[sel] = weights
Beispiel #5
0
def BLavg_multi(sorted_ms_dict,
                baseline_dict,
                input_colname,
                output_colname,
                ionfactor,
                clobber=True,
                maxgap_sec=1800,
                check_files=True):
    """
    Averages data using a sliding Gaussian kernel on the weights
    """

    #### sort msnames into groups with gaps < maxgap_sec
    nfiles = len(sorted_ms_dict['msnames'])
    ms_groups = []
    newgroup = []
    for msindex in xrange(nfiles):
        if msindex + 1 == nfiles or sorted_ms_dict['starttimes'][
                msindex +
                1] > sorted_ms_dict['endtimes'][msindex] + maxgap_sec:
            newgroup.append(sorted_ms_dict['msnames'][msindex])
            ms_groups.append(newgroup)
            newgroup = []
        else:
            newgroup.append(sorted_ms_dict['msnames'][msindex])

    print "BLavg_multi: Working on", len(
        ms_groups), "groups of measurement sets."
    #### loop over all groups
    msindex = 0
    for ms_names in ms_groups:
        ### collect data from all files in this group
        freqtab = pt.table(ms_names[0] + '::SPECTRAL_WINDOW', ack=False)
        freq = freqtab.getcell('REF_FREQUENCY', 0)
        freqtab.close()
        timepersample = None
        ant1_list = []
        ant2_list = []
        all_time_list = []
        all_data_list = []
        all_weights_list = []
        all_flags_list = []
        for msfile in ms_names:
            if not os.path.exists(msfile):
                print("Cannot find MS file: {0}.".format(msfile))
                sys.exit(1)
            # open input/output MS
            ms = pt.table(msfile, readonly=True, ack=False)
            if check_files:
                freqtab = pt.table(msfile + '::SPECTRAL_WINDOW', ack=False)
                if freqtab.getcell('REF_FREQUENCY', 0) != freq:
                    print("Different REF_FREQUENCYs: {0} and: {1} in {2}.".
                          format(freq, freqtab.getcell('REF_FREQUENCY', 0),
                                 msfile))
                    sys.exit(1)
                freqtab.close()
            #wav = 299792458. / freq
            if timepersample is None:
                timepersample = ms.getcell('INTERVAL', 0)
            elif check_files:
                if timepersample != ms.getcell('INTERVAL', 0):
                    print("Different INTERVALs: {0} and: {1} in {2}.".format(
                        timepersample, ms.getcell('INTERVAL', 0), msfile))
                    sys.exit(1)
            all_time_list.append(ms.getcol('TIME_CENTROID'))
            ant1_list.append(ms.getcol('ANTENNA1'))
            ant2_list.append(ms.getcol('ANTENNA2'))
            all_data_list.append(ms.getcol(input_colname))
            all_weights_list.append(ms.getcol('WEIGHT_SPECTRUM'))
            all_flags_list.append(ms.getcol('FLAG'))

            all_flags_list[-1][np.isnan(all_data_list[-1])] = True  # flag NaNs
            all_weights_list[-1] = all_weights_list[-1] * ~all_flags_list[
                -1]  # set weight of flagged data to 0

            # Check that all NaNs are flagged
            if np.count_nonzero(
                    np.isnan(all_data_list[-1][~all_flags_list[-1]])) > 0:
                logging.error('NaNs in unflagged data in {0}!'.format(msfile))
                sys.exit(1)

        ### iteration on baseline combination
        for ant in itertools.product(set(ant1_list[0]), set(ant2_list[0])):
            if ant[0] >= ant[1]:
                continue
            sel_list = []
            weights_list = []
            data_list = []
            # select data from all MSs
            for msindex in xrange(len(ms_names)):
                sel1 = np.where(ant1_list[msindex] == ant[0])[0]
                sel2 = np.where(ant2_list[msindex] == ant[1])[0]
                sel_list.append(
                    sorted(list(frozenset(sel1).intersection(sel2))))

                # # get weights and data
                # weights_list.append( all_weights[sel_list[msindex],:,:] )
                # data_list.append( all_data[sel_list[msindex],:,:] )
            # combine data and weights into one array
            data = all_data_list[0][sel_list[0], :, :]
            weights = all_weights_list[0][sel_list[0], :, :]
            fillshape = list(data.shape)
            startidx = [0]
            endidx = [data.shape[0]]
            for msindex in xrange(1, len(ms_names)):
                #pad gap between obs
                filltimes = np.arange(np.max(all_time_list[msindex - 1]),
                                      np.min(all_time_list[msindex]),
                                      timepersample)
                fillshape[0] = len(filltimes)
                data = np.concatenate((data, np.zeros(fillshape)), axis=0)
                weights = np.concatenate((weights, np.zeros(fillshape)),
                                         axis=0)
                startidx.append(data.shape[0])
                data = np.concatenate(
                    (data, all_data_list[msindex][sel_list[msindex], :, :]),
                    axis=0)
                weights = np.concatenate(
                    (weights,
                     all_weights_list[msindex][sel_list[msindex], :, :]),
                    axis=0)
                endidx.append(data.shape[0])

            # compute the FWHM
            dist = baseline_dict['{0}-{1}'.format(ant[0], ant[1])]
            stddev = 30.0 * ionfactor * np.sqrt(
                (25.0 / dist)) * (freq / 60.e6)  # in sec
            stddev = stddev / timepersample  # in samples

            #    Multiply every element of the data by the weights, convolve both
            #    the scaled data and the weights, and then divide the convolved data
            #    by the convolved weights (translating flagged data into weight=0).
            #    That's basically the equivalent of a running weighted average with
            #    a Gaussian window function.

            # weigth data and set bad data to 0 so nans do not propagate
            data = np.nan_to_num(data * weights)

            # smear weighted data and weights
            dataR = gfilter(np.real(data), stddev, axis=0)  #, truncate=4.)
            dataI = gfilter(np.imag(data), stddev, axis=0)  #, truncate=4.)
            weights = gfilter(weights, stddev, axis=0)  #, truncate=4.)

            # re-create data
            data = (dataR + 1j * dataI)
            data[(weights != 0)] /= weights[(weights != 0)]  # avoid divbyzero
            for msindex in xrange(len(ms_names)):
                all_data_list[msindex][sel_list[msindex], :, :] = data[
                    startidx[msindex]:endidx[msindex], :, :]
                all_weights_list[msindex][sel_list[msindex], :, :] = weights[
                    startidx[msindex]:endidx[msindex], :, :]

        ### write the data back to the files
        for msindex in xrange(len(ms_names)):
            ms = pt.table(ms_names[msindex], readonly=False, ack=False)
            # Add the output columns if needed
            if output_colname not in ms.colnames():
                desc = ms.getcoldesc(input_colname)
                desc['name'] = output_colname
                ms.addcols(desc)

            ms.putcol(output_colname, all_data_list[msindex])
            ms.putcol('FLAG', all_flags_list[msindex]
                      )  # this saves flags of nans, which is always good
            ms.putcol('WEIGHT_SPECTRUM', all_weights_list[msindex])
            ms.close()
        print "BLavg_multi: Finished one group of measurement sets."
Beispiel #6
0
        weights[flags] = 0  # set weight of flagged data to 0
        del flags

        # Multiply every element of the data by the weights, convolve both the scaled data and the weights, and then
        # divide the convolved data by the convolved weights (translating flagged data into weight=0). That's basically the equivalent of a
        # running weighted average with a Gaussian window function.

        # set bad data to 0 so nans do not propagate
        data = np.nan_to_num(data * weights)

        # smear weighted data and weights
        if options.onlyamp:
            dataAMP = np.abs(data)
            dataPH = np.angle(data)
            if not options.notime:
                dataAMP = gfilter(dataAMP, stddev_t, axis=0)
            if not options.nofreq:
                dataAMP = gfilter(dataAMP, stddev_f, axis=1)
        else:
            dataR = np.real(data)
            dataI = np.imag(data)
            if not options.notime:
                dataR = gfilter(dataR, stddev_t, axis=0)  #, truncate=4.)
                dataI = gfilter(dataI, stddev_t, axis=0)  #, truncate=4.)
            if not options.nofreq:
                dataR = gfilter(dataR, stddev_f, axis=1)  #, truncate=4.)
                dataI = gfilter(dataI, stddev_f, axis=1)  #, truncate=4.)

        if not options.notime:
            weights = gfilter(weights, stddev_t, axis=0)  #, truncate=4.)
        if not options.nofreq:
def main(ms_input, input_colname, output_data_colname, output_weights_colname,
    baseline_file, delta_theta_deg, target_peak_reduction_factor=0.99):
    """
    Pre-average data using a sliding Gaussian kernel in frequency

    Parameters
    ----------
    ms_input : str
        MS filename
    input_colname : str
        Name of the column in the MS from which the data are read
    output_data_colname : str
        Name of the column in the MS into which the averaged data are written
    output_weights_colname : str
        Name of the column in the MS into which the averaged data weights are
        written
    baseline_file : str
        Filename of pickled baseline lengths
    delta_theta_deg : float
        Radius of calibration region in degrees
    target_peak_reduction_factor : float, optional
        Target reduction in peak flux density. Note: this reduction is in
        addition to any incurred by earlier averaging

    """
    if os.path.exists(baseline_file):
        f = open(baseline_file, 'r')
        baseline_dict = pickle.load(f)
        f.close()
    else:
        print('Cannot find baseline_file. Exiting...')
        sys.exit(1)
    delta_theta_deg = float(delta_theta_deg)
    target_peak_reduction_factor = float(target_peak_reduction_factor)

    ms = pt.table(ms_input, readonly=False, ack=False)
    ant1_list = ms.getcol('ANTENNA1')
    ant2_list = ms.getcol('ANTENNA2')
    data_all = ms.getcol(input_colname)
    weights_all = ms.getcol('WEIGHT_SPECTRUM')
    flags = ms.getcol('FLAG')

    # Get lowest frequency of MS and channel width
    sw = pt.table(ms_input+'::SPECTRAL_WINDOW', ack=False)
    freq_hz = sw.col('CHAN_FREQ')[0][0]
    chan_width_hz = sw.col('CHAN_WIDTH')[0][0]

    flags[ np.isnan(data_all) ] = True # flag NaNs
    weights_all = weights_all * ~flags # set weight of flagged data to 0

    # Check that all NaNs are flagged
    if np.count_nonzero(np.isnan(data_all[~flags])) > 0:
        logging.error('NaNs in unflagged data in {0}!'.format(ms_input))
        sys.exit(1)

    # Weight data and set bad data to 0 so nans do not propagate
    data_all = np.nan_to_num(data_all*weights_all)

    # Iteration on baseline combination
    for ant in itertools.product(set(ant1_list), set(ant2_list)):
        if ant[0] >= ant[1]:
            continue
        sel1 = np.where(ant1_list == ant[0])[0]
        sel2 = np.where(ant2_list == ant[1])[0]
        sel_list = sorted(list(frozenset(sel1).intersection(sel2)))

        data = data_all[sel_list,:,:]
        weights = weights_all[sel_list,:,:]

        # compute the Gaussian sigma from the max bandwidth over which we
        # can average and avoid significant bandwidth smearing but limited to
        # no more than 3 MHz (to avoid smoothing over the beam-induced effects)
        lambda_km = 299792.458 / freq_hz
        dist_km = baseline_dict['{0}-{1}'.format(ant[0], ant[1])]
        if dist_km > 0:
            resolution_deg = lambda_km / dist_km * 180.0 / np.pi
            stddev_hz = min(3e6, get_target_bandwidth(freq_hz, delta_theta_deg,
                resolution_deg, target_peak_reduction_factor)/4.0)
            stddev_nchan = stddev_hz / chan_width_hz * np.sqrt(0.5 / dist_km)

            # smear weighted data and weights
            dataR = gfilter(np.real(data), stddev_nchan, axis=1)
            dataI = gfilter(np.imag(data), stddev_nchan, axis=1)
            weights = gfilter(weights, stddev_nchan, axis=1)

            # re-create data
            data = (dataR + 1j * dataI)
            data[(weights != 0)] /= weights[(weights != 0)] # avoid divbyzero
            data_all[sel_list,:,:] = data
            weights_all[sel_list,:,:] = weights

    # Add the output columns if needed
    if output_data_colname not in ms.colnames():
        desc = ms.getcoldesc(input_colname)
        desc['name'] = output_data_colname
        ms.addcols(desc)
    if output_weights_colname not in ms.colnames():
        desc = ms.getcoldesc('WEIGHT_SPECTRUM')
        desc['name'] = output_weights_colname
        ms.addcols(desc)

    ms.putcol(output_data_colname, data_all)
    ms.putcol('FLAG', flags) # this saves flags of nans, which is always good
    ms.putcol(output_weights_colname, weights_all)
    ms.close()
def BLavg_multi(sorted_ms_dict, baseline_dict, input_colname, output_colname, ionfactor,
          clobber=True, maxgap_sec=1800, check_files = True):
    """
    Averages data using a sliding Gaussian kernel on the weights
    """

    #### sort msnames into groups with gaps < maxgap_sec
    nfiles = len(sorted_ms_dict['msnames'])
    ms_groups = []
    newgroup = []
    for msindex in xrange(nfiles):
        if msindex+1 == nfiles or sorted_ms_dict['starttimes'][msindex+1] > sorted_ms_dict['endtimes'][msindex] + maxgap_sec:
            newgroup.append(sorted_ms_dict['msnames'][msindex])
            ms_groups.append(newgroup)
            newgroup = []
        else:
            newgroup.append(sorted_ms_dict['msnames'][msindex])

    print "BLavg_multi: Working on",len(ms_groups),"groups of measurement sets."
    #### loop over all groups
    msindex = 0
    for ms_names in ms_groups:
        ### collect data from all files in this group
        freqtab = pt.table(ms_names[0] + '::SPECTRAL_WINDOW', ack=False)
        freq = freqtab.getcell('REF_FREQUENCY',0)
        freqtab.close()
        timepersample = None
        ant1_list        = []
        ant2_list        = []
        all_time_list    = []
        all_data_list    = []
        all_weights_list = []
        all_flags_list   = []
        for msfile in ms_names:
            if not os.path.exists(msfile):
                print("Cannot find MS file: {0}.".format(msfile))
                sys.exit(1)
            # open input/output MS
            ms = pt.table(msfile, readonly=True, ack=False)
            if check_files:
                freqtab = pt.table(msfile + '::SPECTRAL_WINDOW', ack=False)
                if freqtab.getcell('REF_FREQUENCY',0) != freq:
                    print("Different REF_FREQUENCYs: {0} and: {1} in {2}.".format(freq,freqtab.getcell('REF_FREQUENCY',0),msfile))
                    sys.exit(1)
                freqtab.close()
            #wav = 299792458. / freq
            if timepersample is None:
                timepersample = ms.getcell('INTERVAL',0)
            elif check_files:
                if timepersample != ms.getcell('INTERVAL',0):
                    print("Different INTERVALs: {0} and: {1} in {2}.".format(timepersample,ms.getcell('INTERVAL',0),msfile))
                    sys.exit(1)
            all_time_list.append( ms.getcol('TIME_CENTROID') )
            ant1_list.append( ms.getcol('ANTENNA1') )
            ant2_list.append( ms.getcol('ANTENNA2') )
            all_data_list.append( ms.getcol(input_colname) )
            all_weights_list.append( ms.getcol('WEIGHT_SPECTRUM') )
            all_flags_list.append( ms.getcol('FLAG') )

            all_flags_list[-1][ np.isnan(all_data_list[-1]) ] = True # flag NaNs
            all_weights_list[-1] = all_weights_list[-1] * ~all_flags_list[-1] # set weight of flagged data to 0

            # Check that all NaNs are flagged
            if np.count_nonzero(np.isnan(all_data_list[-1][~all_flags_list[-1]])) > 0:
                logging.error('NaNs in unflagged data in {0}!'.format(msfile))
                sys.exit(1)

        ### iteration on baseline combination
        for ant in itertools.product(set(ant1_list[0]), set(ant2_list[0])):
            if ant[0] >= ant[1]:
                continue
            sel_list = []
            weights_list = []
            data_list = []
            # select data from all MSs
            for msindex in xrange(len(ms_names)):
                sel1 = np.where(ant1_list[msindex] == ant[0])[0]
                sel2 = np.where(ant2_list[msindex] == ant[1])[0]
                sel_list.append( sorted(list(frozenset(sel1).intersection(sel2))) )

                # # get weights and data
                # weights_list.append( all_weights[sel_list[msindex],:,:] )
                # data_list.append( all_data[sel_list[msindex],:,:] )
            # combine data and weights into one array
            data = all_data_list[0][sel_list[0],:,:]
            weights = all_weights_list[0][sel_list[0],:,:]
            fillshape = list(data.shape)
            startidx = [0]
            endidx = [data.shape[0]]
            for msindex in xrange(1,len(ms_names)):
                #pad gap between obs
                filltimes = np.arange(np.max(all_time_list[msindex-1]),np.min(all_time_list[msindex]),timepersample)
                fillshape[0] = len(filltimes)
                data = np.concatenate( (data,np.zeros(fillshape)), axis=0 )
                weights = np.concatenate( (weights,np.zeros(fillshape)), axis=0  )
                startidx.append(data.shape[0])
                data = np.concatenate( (data,all_data_list[msindex][sel_list[msindex],:,:]), axis=0  )
                weights = np.concatenate( (weights,all_weights_list[msindex][sel_list[msindex],:,:]), axis=0  )
                endidx.append(data.shape[0])

            # compute the FWHM
            dist = baseline_dict['{0}-{1}'.format(ant[0], ant[1])]
            stddev = 30.0 * ionfactor * np.sqrt((25.0 / dist)) * (freq / 60.e6) # in sec
            stddev = stddev/timepersample # in samples

            #    Multiply every element of the data by the weights, convolve both
            #    the scaled data and the weights, and then divide the convolved data
            #    by the convolved weights (translating flagged data into weight=0).
            #    That's basically the equivalent of a running weighted average with
            #    a Gaussian window function.

            # weigth data and set bad data to 0 so nans do not propagate
            data = np.nan_to_num(data*weights)

            # smear weighted data and weights
            dataR = gfilter(np.real(data), stddev, axis=0)#, truncate=4.)
            dataI = gfilter(np.imag(data), stddev, axis=0)#, truncate=4.)
            weights = gfilter(weights, stddev, axis=0)#, truncate=4.)

            # re-create data
            data = (dataR + 1j * dataI)
            data[(weights != 0)] /= weights[(weights != 0)] # avoid divbyzero
            for msindex in xrange(len(ms_names)):
                all_data_list[msindex][sel_list[msindex],:,:] = data[startidx[msindex]:endidx[msindex],:,:]
                all_weights_list[msindex][sel_list[msindex],:,:] = weights[startidx[msindex]:endidx[msindex],:,:]

        ### write the data back to the files
        for msindex in xrange(len(ms_names)):
            ms = pt.table(ms_names[msindex], readonly=False, ack=False)
            # Add the output columns if needed
            if output_colname not in ms.colnames():
                desc = ms.getcoldesc(input_colname)
                desc['name'] = output_colname
                ms.addcols(desc)

            ms.putcol(output_colname, all_data_list[msindex])
            ms.putcol('FLAG', all_flags_list[msindex]) # this saves flags of nans, which is always good
            ms.putcol('WEIGHT_SPECTRUM', all_weights_list[msindex])
            ms.close()
        print "BLavg_multi: Finished one group of measurement sets."
Beispiel #9
0
uwind_now = numpy.array(uwind_now)
vwind_now = numpy.array(vwind_now)
hght_now = numpy.array(hght_now)
temp_now = numpy.array(temp_now)
uwind_fcst = numpy.array(uwind_fcst)
vwind_fcst = numpy.array(vwind_fcst)
hght_fcst = numpy.array(hght_fcst)
temp_fcst = numpy.array(temp_fcst)

# check the shapes of everything
shapeCheck(uwind_now, vwind_now, hght_now, temp_now, lats, lons)
shapeCheck(uwind_fcst, vwind_fcst, hght_fcst, temp_fcst, lats, lons)

# run the base variables through a gaussian filter to smooth the output
for i in range(uwind_now.shape[0]):
    uwind_now[i, :, :] = gfilter(uwind_now[i, :, :], sigma)
    vwind_now[i, :, :] = gfilter(vwind_now[i, :, :], sigma)
    hght_now[i, :, :] = gfilter(hght_now[i, :, :], sigma)
    temp_now[i, :, :] = gfilter(temp_now[i, :, :], sigma)
    uwind_fcst[i, :, :] = gfilter(uwind_fcst[i, :, :], sigma)
    vwind_fcst[i, :, :] = gfilter(vwind_fcst[i, :, :], sigma)
    hght_fcst[i, :, :] = gfilter(hght_fcst[i, :, :], sigma)
    temp_fcst[i, :, :] = gfilter(temp_fcst[i, :, :], sigma)

# get derived variables at each pressure level
deformation_now, convergence_now = windProducts(lons, lats, uwind_now,
                                                vwind_now)
deformation_fcst, convergence_fcst = windProducts(lons, lats, uwind_fcst,
                                                  vwind_fcst)
vws_now = windShear(uwind_now, vwind_now, hght_now)
ellrod_now = (vws_now * (deformation_now + convergence_now)) * 1e7
Beispiel #10
0
def BLavg(msfile,
          baseline_dict,
          input_colname,
          output_colname,
          ionfactor,
          clobber=True):
    """
    Averages data using a sliding Gaussian kernel on the weights
    """
    if not os.path.exists(msfile):
        print("Cannot find MS file.")
        sys.exit(1)

    # open input/output MS
    ms = pt.table(msfile, readonly=False, ack=False)
    freqtab = pt.table(msfile + '::SPECTRAL_WINDOW', ack=False)
    freq = freqtab.getcol('REF_FREQUENCY')
    freqtab.close()
    wav = 299792458. / freq
    timepersample = ms.getcell('INTERVAL', 0)
    all_time = ms.getcol('TIME_CENTROID')

    ant1 = ms.getcol('ANTENNA1')
    ant2 = ms.getcol('ANTENNA2')
    all_data = ms.getcol(input_colname)
    all_weights = ms.getcol('WEIGHT_SPECTRUM')
    all_flags = ms.getcol('FLAG')

    all_flags[np.isnan(all_data)] = True  # flag NaNs
    all_weights = all_weights * ~all_flags  # set weight of flagged data to 0

    # Check that all NaNs are flagged
    if np.count_nonzero(np.isnan(all_data[~all_flags])) > 0:
        logging.error('NaNs in unflagged data!')
        sys.exit(1)

    # iteration on baseline combination
    for ant in itertools.product(set(ant1), set(ant2)):

        if ant[0] >= ant[1]:
            continue
        sel1 = np.where(ant1 == ant[0])[0]
        sel2 = np.where(ant2 == ant[1])[0]
        sel = sorted(list(frozenset(sel1).intersection(sel2)))

        # compute the FWHM
        dist = baseline_dict['{0}-{1}'.format(ant[0], ant[1])]
        stddev = 30.0 * ionfactor * np.sqrt(
            (25.0 / dist)) * (freq / 60.e6)  # in sec
        stddev = stddev / timepersample  # in samples

        #    Multiply every element of the data by the weights, convolve both
        #    the scaled data and the weights, and then divide the convolved data
        #    by the convolved weights (translating flagged data into weight=0).
        #    That's basically the equivalent of a running weighted average with
        #    a Gaussian window function.

        # get weights and data
        weights = all_weights[sel, :, :]
        data = all_data[sel, :, :]

        # set bad data to 0 so nans do not propagate
        data = np.nan_to_num(data * weights)

        # smear weighted data and weights
        dataR = gfilter(np.real(data), stddev, axis=0)  #, truncate=4.)
        dataI = gfilter(np.imag(data), stddev, axis=0)  #, truncate=4.)
        weights = gfilter(weights, stddev, axis=0)  #, truncate=4.)

        # re-create data
        data = (dataR + 1j * dataI)
        data[(weights != 0)] /= weights[(weights != 0)]  # avoid divbyzero
        all_data[sel, :, :] = data
        all_weights[sel, :, :] = weights

    # Add the output columns if needed
    if output_colname not in ms.colnames():
        desc = ms.getcoldesc(input_colname)
        desc['name'] = output_colname
        ms.addcols(desc)

    ms.putcol(output_colname, all_data)
    ms.putcol('FLAG',
              all_flags)  # this saves flags of nans, which is always good
    ms.putcol('WEIGHT_SPECTRUM', all_weights)
    ms.close()