Esempio n. 1
0
def nothing(noth):
    # If requested, remove the time gradient from all channels.
    if remove_slope:
        un_mask = sp.logical_not(ma.getmaskarray(NoiseData.data))
        NoiseData.calc_time()
        time = NoiseData.time
        n_time = len(time)
        # Test if the mask is the same for all slices.  If it is, that greatly
        # reduces the work as we only have to generate one set of polynomials.
        all_masks_same = True
        for jj in range(n_time):
            if sp.all(un_mask[jj,...] == un_mask[jj,0,0,0]):
                continue
            else:
                all_masks_same = False
                break
        if all_masks_same:
            polys = misc.ortho_poly(time, 2, un_mask[:,0,0,0], 0)
            polys.shape = (2, len(time), 1, 1, 1)
        else:
            polys = misc.ortho_poly(time[:,None,None,None], 2, un_mask, 0)
        # Subtract the slope mode (1th mode) out of the NoiseData.
        slope_amps = sp.sum(polys[1,...] * un_mask * NoiseData.data.filled(0),
                            0)
        NoiseData.data -= polys[1,...] * slope_amps
    # Iteratively flag on sliding scale to get closer and closer to desired
    # threshold.
    n_time = Data.data.shape[0]
    max_thres = sp.sqrt(n_time)/2.
    n_iter = 3
    thresholds = (max_thres ** (n_iter - 1 - sp.arange(n_iter))
                 * thres ** sp.arange(n_iter)) ** (1./(n_iter - 1))
    for threshold in thresholds:
        # Get the deviation from the mean.
        residuals = ma.anom(NoiseData.data, 0).filled(0)
        # Get indices above the threshold.
        mask = abs(residuals) > threshold * ma.std(NoiseData.data, 0)
        # Mask the data.
        Data.data[mask] = ma.masked
        NoiseData.data[mask] = ma.masked
    
    # Now flag for very noisey channels.
    if max_noise_factor > 0:
        vars = ma.var(NoiseData.data, 0)
        mean_vars = ma.mean(vars, -1).filled(0)
        bad_chans = vars.filled(0) > max_noise_factor * mean_vars[:,:,None]
        Data.data[:,bad_chans] = ma.masked
        NoiseData.data[:,bad_chans] = ma.masked
Esempio n. 2
0
def nothing(noth):
    # If requested, remove the time gradient from all channels.
    if remove_slope:
        un_mask = sp.logical_not(ma.getmaskarray(NoiseData.data))
        NoiseData.calc_time()
        time = NoiseData.time
        n_time = len(time)
        # Test if the mask is the same for all slices.  If it is, that greatly
        # reduces the work as we only have to generate one set of polynomials.
        all_masks_same = True
        for jj in range(n_time):
            if sp.all(un_mask[jj, ...] == un_mask[jj, 0, 0, 0]):
                continue
            else:
                all_masks_same = False
                break
        if all_masks_same:
            polys = misc.ortho_poly(time, 2, un_mask[:, 0, 0, 0], 0)
            polys.shape = (2, len(time), 1, 1, 1)
        else:
            polys = misc.ortho_poly(time[:, None, None, None], 2, un_mask, 0)
        # Subtract the slope mode (1th mode) out of the NoiseData.
        slope_amps = sp.sum(polys[1, ...] * un_mask * NoiseData.data.filled(0),
                            0)
        NoiseData.data -= polys[1, ...] * slope_amps
    # Iteratively flag on sliding scale to get closer and closer to desired
    # threshold.
    n_time = Data.data.shape[0]
    max_thres = sp.sqrt(n_time) / 2.
    n_iter = 3
    thresholds = (max_thres**(n_iter - 1 - sp.arange(n_iter)) *
                  thres**sp.arange(n_iter))**(1. / (n_iter - 1))
    for threshold in thresholds:
        # Get the deviation from the mean.
        residuals = ma.anom(NoiseData.data, 0).filled(0)
        # Get indices above the threshold.
        mask = abs(residuals) > threshold * ma.std(NoiseData.data, 0)
        # Mask the data.
        Data.data[mask] = ma.masked
        NoiseData.data[mask] = ma.masked

    # Now flag for very noisey channels.
    if max_noise_factor > 0:
        vars = ma.var(NoiseData.data, 0)
        mean_vars = ma.mean(vars, -1).filled(0)
        bad_chans = vars.filled(0) > max_noise_factor * mean_vars[:, :, None]
        Data.data[:, bad_chans] = ma.masked
        NoiseData.data[:, bad_chans] = ma.masked
Esempio n. 3
0
def get_chan_fit_data(pars, Blocks, chan_ind):
    chan_data = preprocess_blocks(Blocks, chan_ind)
    source, width, beam, scans = unpack_parameters(pars)
    # Make the beam object.
    Beam = pol_beam.SimpleBeam()
    Beam.set_width(width)
    Beam.set_coefficients(beam)
    # Initialize the residuals array.
    data = np.empty((4, g_n_cal, g_n_time), dtype=np.float64)
    model = np.zeros((4, g_n_cal, g_n_time), dtype=np.float64)
    weight = np.empty((4, g_n_cal, g_n_time), dtype=np.float64)
    ra_source = source[0]
    dec_source = source[1]
    # Loop through the data calculate the residuals.
    time_ind = 0
    for jj in range(g_n_blocks):
        block_data = chan_data[jj]
        n_t = block_data['data'].shape[-1]
        data[:, :, time_ind:time_ind + n_t] = block_data['data']
        weight[:, :, time_ind:time_ind + n_t] = block_data['weight']
        # Add mean, slope and other scan components.
        time = block_data['time']
        polys = misc.ortho_poly(time, g_n_scan_comp)
        # Renormalize the 0th polynomial such that the amplitude is interpreted
        # as the system temperature.
        polys[0, :] = 1
        for kk in range(g_n_scan_comp):
            model[:, :, time_ind:time_ind +
                  n_t] += (scans[jj, :, :, kk, None] * polys[kk, :])
        # Calculate the beam model.
        ra = block_data['ra']
        dec = block_data['dec']
        # XXX
        # ra_factor = np.cos(dec_source * np.pi / 180)
        ra_factor = 1
        beam_model = Beam.get_slice((ra_source - ra) * ra_factor,
                                    dec_source - dec)
        model[:, :, time_ind:time_ind + n_t] += beam_model[0, :, None, :]
        # Noise weight.
        time_ind += n_t
    return data, model, weight
Esempio n. 4
0
def get_chan_fit_data(pars, Blocks, chan_ind):
    chan_data = preprocess_blocks(Blocks, chan_ind)
    source, width, beam, scans = unpack_parameters(pars)
    # Make the beam object.
    Beam = pol_beam.SimpleBeam()
    Beam.set_width(width)
    Beam.set_coefficients(beam)
    # Initialize the residuals array.
    data = np.empty((4, g_n_cal, g_n_time), dtype=np.float64)
    model = np.zeros((4, g_n_cal, g_n_time), dtype=np.float64)
    weight = np.empty((4, g_n_cal, g_n_time), dtype=np.float64)
    ra_source = source[0]
    dec_source = source[1]
    # Loop through the data calculate the residuals.
    time_ind = 0
    for jj in range(g_n_blocks):
        block_data = chan_data[jj]
        n_t = block_data["data"].shape[-1]
        data[:, :, time_ind : time_ind + n_t] = block_data["data"]
        weight[:, :, time_ind : time_ind + n_t] = block_data["weight"]
        # Add mean, slope and other scan components.
        time = block_data["time"]
        polys = misc.ortho_poly(time, g_n_scan_comp)
        # Renormalize the 0th polynomial such that the amplitude is interpreted
        # as the system temperature.
        polys[0, :] = 1
        for kk in range(g_n_scan_comp):
            model[:, :, time_ind : time_ind + n_t] += scans[jj, :, :, kk, None] * polys[kk, :]
        # Calculate the beam model.
        ra = block_data["ra"]
        dec = block_data["dec"]
        # XXX
        # ra_factor = np.cos(dec_source * np.pi / 180)
        ra_factor = 1
        beam_model = Beam.get_slice((ra_source - ra) * ra_factor, dec_source - dec)
        model[:, :, time_ind : time_ind + n_t] += beam_model[0, :, None, :]
        # Noise weight.
        time_ind += n_t
    return data, model, weight
Esempio n. 5
0
    def generate_scan_basis_polys(self, order):
        """Generate basis time polynomials for each scan up to order.
        
        Generate basis polynomials as a function of time for each scan 
        up to the passed maximum order.

        Parameters
        ----------
        order : integer
            Maximum order of the polynomials.

        Returns
        -------
        polys : ndarray of shape (n_scan, `order`, n_time)
        """

        polys = np.zeros((self.n_scan, order, self.n_time), dtype=np.float64)
        scan_ind = 0
        for data, start, end in self.iterate_scans():
            time = data["time"]
            this_poly = misc.ortho_poly(time, order)
            polys[scan_ind, :, start:end] = this_poly
            scan_ind += 1
        return polys
Esempio n. 6
0
    def generate_scan_basis_polys(self, order):
        """Generate basis time polynomials for each scan up to order.
        
        Generate basis polynomials as a function of time for each scan 
        up to the passed maximum order.

        Parameters
        ----------
        order : integer
            Maximum order of the polynomials.

        Returns
        -------
        polys : ndarray of shape (n_scan, `order`, n_time)
        """

        polys = np.zeros((self.n_scan, order, self.n_time), dtype=np.float64)
        scan_ind = 0
        for data, start, end in self.iterate_scans():
            time = data['time']
            this_poly = misc.ortho_poly(time, order)
            polys[scan_ind, :, start:end] = this_poly
            scan_ind += 1
        return polys
Esempio n. 7
0
def flag(Data,
         NoiseData,
         thres=3.0,
         max_noise_factor=-1,
         modes_subtract=1,
         filter_type='edge'):
    """Flags data for outliers using a signal subtracted data set.
    
    Flags outliers of in a time stream data by looking at a version of the data
    that has had the signal subtracted out of it.  Each frequency channel,
    polarization and cal state are treated separately.

    Parameters
    ----------
    Data : DataBlock Object
        Data to be flaged.  Upon exit, this object will have new flags.
    NoiseData : DataBlock Object
        Version of `Data` with the signal subtracted.
    thres : float
        Threshold for flagging in units of sigma (default is 3.0).
    modes_subtract : int
        How many modes to remove for high pass filtering.
    filter_type : {'edge', 'gaussian', 'gaussian/edge'}
        Type of high pass filtering to use.
    """

    # Get the mask and the data as normal arrays.
    # Copy seems to be nessisary if the mask is None.
    data = NoiseData.data.filled(0).copy()
    mask = ma.getmaskarray(NoiseData.data)
    ## High pass filter the data to make outliers stand out.
    un_mask = sp.logical_not(mask)
    NoiseData.calc_time()
    time = NoiseData.time
    n_time = len(time)
    # How many basis polynomials we need and with what fraction of each mode
    # gets subtracted out..
    if filter_type == 'edge':
        n_polys = modes_subtract
        subtract_weights = sp.ones(n_polys)
    elif filter_type == 'gaussian' or filter_type == 'gaussian/edge':
        n_polys = 4 * modes_subtract
        subtract_weights = sp.exp(
            -(sp.arange(n_polys, dtype=float) / modes_subtract)**2 / 2.)
        if filter_type == 'gaussian/edge':
            subtract_weights[0:2] = 1.
    # Test if the mask is the same for all slices.  If it is, that greatly
    # reduces the work as we only have to generate one set of polynomials.
    all_masks_same = True
    for jj in range(n_time):
        if sp.all(un_mask[jj, ...] == un_mask[jj, 0, 0, 0]):
            continue
        else:
            all_masks_same = False
            break
    if all_masks_same:
        polys = misc.ortho_poly(time, n_polys, un_mask[:, 0, 0, 0], 0)
        polys.shape = (n_polys, len(time), 1, 1, 1)
    else:
        polys = misc.ortho_poly(time[:, None, None, None], n_polys, un_mask, 0)
    # Subtract the slope mode (1th mode) out of the NoiseData.
    amps = sp.sum(data * un_mask * polys, 1)
    amps *= subtract_weights[:, None, None, None]
    data -= sp.sum(amps[:, None, :, :, :] * un_mask[None, :, :, :, :] * polys,
                   0)
    ## Do the main outlier flagging.
    # Iteratively flag on sliding scale to get closer and closer to desired
    # threshold.
    max_thres = sp.sqrt(n_time) / 2.
    n_iter = 3
    thresholds = (max_thres**(n_iter - 1 - sp.arange(n_iter)) *
                  thres**sp.arange(n_iter))**(1. / (n_iter - 1))
    for threshold in thresholds:
        # Subtract the mean from every channel.
        this_data = masked_subtract_mean(data, mask, 0)
        # Calculate the variance.
        un_mask = sp.logical_not(mask)
        counts = sp.sum(un_mask, 0)
        counts[counts == 0] = 1
        std = sp.sqrt(sp.sum(this_data**2 * un_mask, 0) / counts)
        bad_inds = abs(this_data) > threshold * std
        # If any polarization or cal state is masked, they all should be.
        bad_inds = sp.any(sp.any(bad_inds, 1), 1)
        mask[bad_inds[:, None, None, :]] = True
    ## Now look for times with excusion frequency average
    # (achromatic out-liers).
    # Compute the frequency mean.
    un_mask = sp.logical_not(mask)
    counts = sp.sum(un_mask, -1)
    fmean_un_mask = counts >= 1
    counts[counts == 0] = 1
    fmean = sp.sum(data * un_mask, -1) / counts
    # Subtract the time mean.
    fmean = masked_subtract_mean(fmean, sp.logical_not(fmean_un_mask), 0)
    # Get the variance.
    counts = sp.sum(fmean_un_mask, 0)
    counts[counts == 0] = 1
    fmean_std = sp.sqrt(sp.sum(fmean**2 * fmean_un_mask, 0) / counts)
    # Flag any time that is an outlier (for any polarization or cal state).
    bad_times = sp.any(sp.any(abs(fmean) > thres * fmean_std, 1), 1)
    mask[bad_times, :, :, :] = True
    ## Flag for very noisy channels.
    if max_noise_factor > 0:
        # Do this a few times to make sure we get everything.
        for ii in range(3):
            this_data = masked_subtract_mean(data, mask, 0)
            # Compute varience accounting for the mask.
            un_mask = sp.logical_not(mask)
            counts = sp.sum(un_mask, 0)
            vars_un_mask = counts >= 1
            counts[counts == 0] = 1
            vars = sp.sum(this_data**2 * un_mask, 0) / counts
            # Find the mean of the variences.
            counts = sp.sum(vars_un_mask, -1)
            counts[counts == 0] = 1
            mean_vars = sp.sum(vars * vars_un_mask, -1) / counts
            # Find channels that stand out (for any polarization or cal state).
            bad_chans = sp.any(
                sp.any(vars > max_noise_factor * mean_vars[:, :, None], 0), 0)
            mask[:, :, :, bad_chans] = True
    ## Transfer the mask to the DataBlock objects.
    Data.data[mask] = ma.masked
    NoiseData.data[mask] = ma.masked
Esempio n. 8
0
def get_correlation(Data, maps, interpolation='nearest', modes_subtract=2,
                    filter_type='edge'):
    "Correlates the maps with the data."
    
    n_pols = Data.dims[1]
    if len(maps) != n_pols:
        raise ValueError("Supplied wrong number of maps.")
    # Get the time array (for slope mode subtraction).
    Data.calc_time()
    time = Data.time
    # Initialize outputs.
    correlation = np.zeros(Data.dims[1:], dtype=float)
    normalization = np.zeros(Data.dims[1:], dtype=float)
    for ii in range(n_pols):
        map = maps[ii]
        if map.shape[0] != Data.dims[3]:
            raise RuntimeError("Map and data frequency axes not the same"
                               " length.")
        Data.calc_pointing()
        Data.calc_freq()
        # Figure out which pointings (times) are inside the map bounds.
        map_ra = map.get_axis('ra')
        map_dec = map.get_axis('dec')
        on_map_inds = np.logical_and(
            np.logical_and(Data.ra >= min(map_ra), Data.ra <= max(map_ra)),
            np.logical_and(Data.dec >= min(map_dec), Data.dec <= max(map_dec)))
        # If none of the scan is on the map, skip.
        if not np.any(on_map_inds):
            continue
        # Convert map to a time domain array.
        submap = np.empty((np.sum(on_map_inds), map.shape[0]), dtype=float)
        kk = 0
        for jj in range(len(on_map_inds)) :
            if on_map_inds[jj] :
                submap[kk,:] = map.slice_interpolate([1, 2], 
                        [Data.ra[jj], Data.dec[jj]], kind=interpolation)
                kk += 1
        n_time_on = kk
        # Set up filter parameters.
        if filter_type == 'edge':
            total_modes = modes_subtract
            subtract_weights = np.ones(total_modes)
        if filter_type == 'gaussian' or filter_type == 'gaussian/edge':
            total_modes = min((4*modes_subtract, n_time_on))
            # Gaussian taper to filtering.
            subtract_weights = np.exp(-(np.arange(total_modes, dtype=float)
                                      / modes_subtract)**2 / 2.)
            if filter_type == 'gaussian/edge':
                subtract_weights[0:2] = 1.
        # Now get the corresponding data.
        subdata = Data.data[on_map_inds,ii,:,:]
        un_mask = np.logical_not(ma.getmaskarray(subdata))
        subdata = subdata.filled(0)
        # Broadcast map data up to the same shape as the time stream (add cal
        # axis).
        submap = np.zeros_like(subdata) + submap[:,None,:]
        # Get rid of the low frequency, smooth components by subtracting out
        # basis polynomials.
        # Generate basis polynomials that are orthnormal given the mask.
        on_map_time = time[on_map_inds]
        # Test if the mask is the same for all slices.  If it is, that greatly
        # reduces the work as we only have to generate one set.
        all_masks_same = True
        for jj in range(n_time_on):
            if np.all(un_mask[jj,...] == un_mask[jj,0,0]):
                continue
            else:
                all_masks_same = False
                break
        if all_masks_same:
            polys = misc.ortho_poly(on_map_time, total_modes,
                                    un_mask[:,0,0], 0)
            polys.shape = (total_modes, n_time_on, 1, 1)
        else:
            polys = misc.ortho_poly(on_map_time[:,None,None], total_modes,
                                    un_mask, 0)
        # Subtract out of the data.
        mags_data = np.sum(subdata * un_mask * polys, 1)
        # If using a taper, add that in.
        mags_data *= subtract_weights[:,None,None]
        to_subtract_data = np.sum(mags_data[:,None,...] * polys, 0)
        subdata -= to_subtract_data
        # Subtract out of the map.
        mags_map = np.sum(submap * un_mask * polys, 1)
        mags_map *= subtract_weights[:,None,None]
        to_subtract_map = np.sum(mags_map[:,None,...] * polys, 0)
        submap -= to_subtract_map
        # Calculate the correlation and the normalization.
        corr = np.sum(submap * un_mask * subdata, 0)
        norm = np.sum(submap * un_mask * submap, 0)
        # Calculate inverse reduced Chi-squared and weight this measurement by
        # it. Be carefull about the 0 information case (all masked).
        filled_norm = norm.copy()
        # No information.
        bad_inds = np.logical_or(norm == 0, np.sum(un_mask, 0) <
                                 2 * modes_subtract)
        filled_norm[bad_inds] = 1
        amp = corr / filled_norm
        fit = submap * amp
        inv_chi_sq = np.sum((subdata - fit)**2 * un_mask, 0)
        inv_chi_sq[bad_inds] = 1.
        inv_chi_sq = np.sum(un_mask, 0) / inv_chi_sq
        inv_chi_sq[bad_inds] = 0
        corr *= inv_chi_sq
        norm *= inv_chi_sq
        # Store results in output arrays
        correlation[ii,:,:] = corr
        normalization[ii,:,:] = norm
        if False and (ii == 0) and int(Data.field['SCAN']) == 86:
            print correlation[ii,:,:]
            print normalization[ii,:,:]
            print inv_chi_sq
            print correlation[ii,:,:] / normalization[ii,:,:]
            plt.plot(on_map_time, (subdata * un_mask)[:,0,0], '.')
            plt.plot(on_map_time, (fit * un_mask)[:,0,0], '.')
            #plt.plot((subdata * un_mask)[:,0,0] - (submap * un_mask)[:,0,0],
            #           '.')
            #plt.plot((subdata * un_mask)[:,0,3] - (submap * un_mask)[:,0,3],
            #           '.')
            plt.show()
    return correlation, normalization
Esempio n. 9
0
def flag(Data, NoiseData, thres=3.0, max_noise_factor=-1, modes_subtract=1,
         filter_type='edge'):
    """Flags data for outliers using a signal subtracted data set.
    
    Flags outliers of in a time stream data by looking at a version of the data
    that has had the signal subtracted out of it.  Each frequency channel,
    polarization and cal state are treated separately.

    Parameters
    ----------
    Data : DataBlock Object
        Data to be flaged.  Upon exit, this object will have new flags.
    NoiseData : DataBlock Object
        Version of `Data` with the signal subtracted.
    thres : float
        Threshold for flagging in units of sigma (default is 3.0).
    modes_subtract : int
        How many modes to remove for high pass filtering.
    filter_type : {'edge', 'gaussian', 'gaussian/edge'}
        Type of high pass filtering to use.
    """
    
    # Get the mask and the data as normal arrays.
    # Copy seems to be nessisary if the mask is None.
    data = NoiseData.data.filled(0).copy()
    mask = ma.getmaskarray(NoiseData.data)
    ## High pass filter the data to make outliers stand out.
    un_mask = sp.logical_not(mask)
    NoiseData.calc_time()
    time = NoiseData.time
    n_time = len(time)
    # How many basis polynomials we need and with what fraction of each mode
    # gets subtracted out..
    if filter_type == 'edge':
        n_polys = modes_subtract
        subtract_weights = sp.ones(n_polys)
    elif filter_type == 'gaussian' or filter_type == 'gaussian/edge':
        n_polys = 4 * modes_subtract
        subtract_weights = sp.exp(-(sp.arange(n_polys, dtype=float)
                                     / modes_subtract)**2 / 2.)
        if filter_type == 'gaussian/edge':
            subtract_weights[0:2] = 1.
    # Test if the mask is the same for all slices.  If it is, that greatly
    # reduces the work as we only have to generate one set of polynomials.
    all_masks_same = True
    for jj in range(n_time):
        if sp.all(un_mask[jj,...] == un_mask[jj,0,0,0]):
            continue
        else:
            all_masks_same = False
            break
    if all_masks_same:
        polys = misc.ortho_poly(time, n_polys, un_mask[:,0,0,0], 0)
        polys.shape = (n_polys, len(time), 1, 1, 1)
    else:
        polys = misc.ortho_poly(time[:,None,None,None], n_polys, un_mask, 0)
    # Subtract the slope mode (1th mode) out of the NoiseData.
    amps = sp.sum(data * un_mask * polys, 1)
    amps *= subtract_weights[:,None,None,None]
    data -= sp.sum(amps[:,None,:,:,:] * un_mask[None,:,:,:,:] * polys, 0)
    ## Do the main outlier flagging.
    # Iteratively flag on sliding scale to get closer and closer to desired
    # threshold.
    max_thres = sp.sqrt(n_time)/2.
    n_iter = 3
    thresholds = (max_thres ** (n_iter - 1 - sp.arange(n_iter))
                 * thres ** sp.arange(n_iter)) ** (1./(n_iter - 1))
    for threshold in thresholds:
        # Subtract the mean from every channel.
        this_data = masked_subtract_mean(data, mask, 0)
        # Calculate the variance.
        un_mask = sp.logical_not(mask)
        counts = sp.sum(un_mask, 0)
        counts[counts == 0] = 1
        std = sp.sqrt(sp.sum(this_data**2 * un_mask, 0) / counts)
        bad_inds = abs(this_data) > threshold * std
        # If any polarization or cal state is masked, they all should be.
        bad_inds = sp.any(sp.any(bad_inds, 1), 1)
        mask[bad_inds[:,None,None,:]] = True
    ## Now look for times with excusion frequency average
    # (achromatic out-liers).
    # Compute the frequency mean.
    un_mask = sp.logical_not(mask)
    counts = sp.sum(un_mask, -1)
    fmean_un_mask = counts >= 1
    counts[counts == 0] = 1
    fmean = sp.sum(data * un_mask, -1) / counts
    # Subtract the time mean.
    fmean = masked_subtract_mean(fmean, sp.logical_not(fmean_un_mask), 0)
    # Get the variance.
    counts = sp.sum(fmean_un_mask, 0)
    counts[counts == 0] = 1
    fmean_std = sp.sqrt(sp.sum(fmean**2 * fmean_un_mask, 0) / counts)
    # Flag any time that is an outlier (for any polarization or cal state).
    bad_times = sp.any(sp.any(abs(fmean) > thres * fmean_std, 1), 1)
    mask[bad_times,:,:,:] = True
    ## Flag for very noisy channels.
    if max_noise_factor > 0:
        # Do this a few times to make sure we get everything.
        for ii in range(3):
            this_data = masked_subtract_mean(data, mask, 0)
            # Compute varience accounting for the mask.
            un_mask = sp.logical_not(mask)
            counts = sp.sum(un_mask, 0)
            vars_un_mask = counts >= 1
            counts[counts == 0] = 1
            vars = sp.sum(this_data**2 * un_mask, 0) / counts
            # Find the mean of the variences.
            counts = sp.sum(vars_un_mask, -1)
            counts[counts == 0] = 1
            mean_vars = sp.sum(vars * vars_un_mask, -1) / counts
            # Find channels that stand out (for any polarization or cal state).
            bad_chans = sp.any(sp.any(vars > max_noise_factor *
                                      mean_vars[:,:,None], 0), 0)
            mask[:,:,:,bad_chans] = True
    ## Transfer the mask to the DataBlock objects.
    Data.data[mask] = ma.masked
    NoiseData.data[mask] = ma.masked
Esempio n. 10
0
def make_masked_time_stream(Blocks, ntime=None, window=None, 
                            return_means=False, subtract_slope=False) :
    """Converts Data Blocks into a single uniformly sampled time stream.
    
    Also produces the mask giving whether elements are valid entries or came
    from a zero pad.  This produes the required inputs for calculating a
    windowed power spectrum.

    Parameters
    ----------
    Blocks : tuple of DataBlock objects.
    ntime : int
        Total number of time bins in output arrays.  If shorter than required
        extra data is truncated.  If longer, extra data is masked.  Default is
        to use exactly the number that fits all the data.  Set to a negitive
        factor to zero pad to a power of 2 and by at least at least the factor.
    window : string or tuple
        Type of window to apply to each DataBlock.  Valid options are the valid
        arguments to scipy.signal.get_window().  By default, don't window.
    return_means : bool
        Whether to return an array of the channed means.
    subtract_slope : bool
        Whether to subtract a linear function of time from each channel.

    Returns
    -------
    time_stream : array
        All the data in `Blocks` but concatenated along the time axis and
        padded with zeros such that the time axis is uniformly sampled and
        uninterupted.
    mask : array same shape as `time_stream`
        1.0 if data in the correspoonding `time_stream` element is filled 
        and 0 if the data was missing.  This is like a window where 
        time_stream = mask*real_data.
    dt : float
        The time step of the returned time stream.
    means : array (optional)
        The mean from each channel.
    """

    # Shape of all axes except the time axis.
    back_shape = Blocks[0].dims[1:]
    # Get the time sample spacing.
    Blocks[0].calc_time()
    dt = abs(sp.mean(sp.diff(Blocks[0].time)))
    # Find the beginning and the end of the time axis by looping through
    # blocks.
    # Also get the time axis and the mask
    # for calculating basis polynomials.
    unmask = sp.zeros((0,) + back_shape, dtype=bool)
    time = sp.zeros((0,), dtype=float)
    start_ind = []
    min_time = float('inf')
    max_time = 0.0
    #mean_time = 0.0
    #n_data_times = 0
    for Data in Blocks :
        Data.calc_time()
        start_ind.append(len(time))
        time = sp.concatenate((time, Data.time))
        this_unmask = sp.logical_not(ma.getmaskarray(Data.data))
        unmask = sp.concatenate((unmask, this_unmask), 0)
        # Often the start or the end of a scan is completly masked.  Make sure
        # we don't start till the first unmasked time and end at the last
        # unmasked time.
        time_unmask = sp.alltrue(ma.getmaskarray(Data.data), -1)
        time_unmask = sp.alltrue(time_unmask, -1)
        time_unmask = sp.alltrue(time_unmask, -1)
        if sp.alltrue(time_unmask):
            continue
        time_unmask = sp.logical_not(time_unmask)
        min_time = min(min_time, min(Data.time[time_unmask]))
        max_time = max(min_time, max(Data.time[time_unmask]))
        #mean_time += sp.sum(Data.time[time_unmask])
        #n_data_times += len(Data.time[time_unmask])
        # Ensure that the time sampling is uniform.
        if not (sp.allclose(abs(sp.diff(Data.time)), dt, rtol=0.1)
                and sp.allclose(abs(sp.mean(sp.diff(Data.time))), dt,
                                rtol=0.001)) :
            msg = ("Time sampling not uniformly spaced or Data Blocks don't "
                   "agree on sampling.")
            raise ce.DataError(msg)
        # Ensure the shapes are right.
        if Data.dims[1:] != back_shape :
            msg = ("All data blocks must have the same shape except the time "
                   "axis.")
            raise ce.DataError(msg)
    # Now calculate basis polynomials for the mean mode and the slope mode.
    polys = misc.ortho_poly(time[:,None,None,None], 2, unmask, 0)
    #mean_time /= n_data_times
    #if n_data_times == 0:
    #    n_data_times = 1
    # Very important to subtract the mean out of the signal, otherwise the
    # window coupling to the mean (0) mode will dominate everything. Can also
    # optionally take out a slope.
    # Old algorithm.
    #total_sum = 0.0
    #total_counts = 0
    #total_slope = 0.0
    #time_norm = 0.0
    #for Data in Blocks:
    #    total_sum += sp.sum(Data.data.filled(0), 0)
    #    total_counts += ma.count(Data.data, 0)
    #    total_slope += sp.sum(Data.data.filled(0) 
    #                          * (Data.time[:,None,None,None] - mean_time), 0)
    #    time_norm += sp.sum(sp.logical_not(ma.getmaskarray(Data.data))
    #                        * (Data.time[:,None,None,None] - mean_time)**2, 0)
    #total_counts[total_counts == 0] = 1
    #time_norm[time_norm == 0.0] = 1
    #total_mean = total_sum / total_counts
    #total_slope /= time_norm
    # New algorithm.
    mean_amp = 0
    slope_amp = 0
    for ii, Data in enumerate(Blocks):
        si = start_ind[ii]
        this_nt = Data.dims[0]
        data = Data.data.filled(0)
        mean_amp += sp.sum(data * unmask[si:si + this_nt,...]
                           * polys[0,si:si + this_nt,...], 0)
        slope_amp += sp.sum(data * unmask[si:si + this_nt,...]
                            * polys[1,si:si + this_nt,...], 0)
    polys[0,...] *= mean_amp
    polys[1,...] *= slope_amp
    # Calculate the time axis.
    if min_time > max_time:
        min_time = 0
        max_time = 6 * dt
    if not ntime :
        ntime = (max_time - min_time) // dt + 1
    elif ntime < 0:
        # 0 pad by a factor of at least -ntime, but at most 10% more than this.
        time_min = -ntime * (max_time - min_time) / dt
        n_block = 1
        while n_block < time_min/20.0:
            n_block *= 2
        ntime = (time_min//n_block  + 1) * n_block

    time = sp.arange(ntime)*dt + min_time
    # Allowcate memory for the outputs.
    time_stream = sp.zeros((ntime,) + back_shape, dtype=float)
    mask = sp.zeros((ntime,) + back_shape, dtype=sp.float32)
    # Loop over all times and fill in the arrays.
    for ii, Data in enumerate(Blocks):
        this_nt = Data.dims[0]
        si = start_ind[ii]
        # Subtract the mean calculated above.
        this_data = Data.data.copy()
        this_data -= polys[0,si:si + this_nt,...]
        # If desired, subtract of the linear function of time.
        if subtract_slope:
            #this_data -= (total_slope 
            #              * (Data.time[:,None,None,None] - mean_time))
            this_data -= polys[1,si:si + this_nt,...]
        # Find the first and last unmasked times.
        time_unmask = sp.alltrue(ma.getmaskarray(this_data), -1)
        time_unmask = sp.alltrue(time_unmask, -1)
        time_unmask = sp.alltrue(time_unmask, -1)
        if sp.alltrue(time_unmask):
            continue
        time_unmask = sp.logical_not(time_unmask)
        unmasked_ind, = sp.where(time_unmask)
        first_ind = min(unmasked_ind)
        last_ind = max(unmasked_ind)
        # Ensure that the time sampling is uniform.
        if not (sp.allclose(abs(sp.diff(Data.time)), dt, rtol=0.1)
                and sp.allclose(abs(sp.mean(sp.diff(Data.time))), dt,
                                rtol=0.001)) :
            msg = ("Time sampling not uniformly spaced or Data Blocks don't "
                   "agree on sampling.")
            raise ce.DataError(msg)
        # Ensure the shapes are right.
        if Data.dims[1:] != back_shape :
            msg = ("All data blocks must have the same shape except the time "
                   "axis.")
        # Apply an offset to the time in case the start of the Data Block
        # doesn't line up with the time array perfectly.
        offset = (time[sp.argmin(abs(time - Data.time[first_ind]))]
                  - Data.time[first_ind])
        # Generate window function.
        if window:
            window_function = sig.get_window(window, last_ind - first_ind + 1)
        for ii in range(first_ind, last_ind + 1) :
            ind = sp.argmin(abs(time - (Data.time[ii] + offset)))
            if abs(time[ind] - (Data.time[ii])) < 0.5*dt :
                if sp.any(mask[ind, ...]) :
                    msg = "Overlapping times in Data Blocks."
                    raise ce.DataError(msg)
                if window:
                    window_value = window_function[ii - first_ind]
                else :
                    window_value = 1.0
                time_stream[ind, ...] = (window_value 
                                         * this_data[ii, ...].filled(0.0))
                mask[ind, ...] = window_value * sp.logical_not(ma.getmaskarray(
                                     this_data)[ii, ...])
    if return_means:
        return time_stream, mask, dt, polys[0,0,...]
    else :
        return time_stream, mask, dt
Esempio n. 11
0
def make_masked_time_stream(Blocks,
                            ntime=None,
                            window=None,
                            return_means=False,
                            subtract_slope=False):
    """Converts Data Blocks into a single uniformly sampled time stream.
    
    Also produces the mask giving whether elements are valid entries or came
    from a zero pad.  This produes the required inputs for calculating a
    windowed power spectrum.

    Parameters
    ----------
    Blocks : tuple of DataBlock objects.
    ntime : int
        Total number of time bins in output arrays.  If shorter than required
        extra data is truncated.  If longer, extra data is masked.  Default is
        to use exactly the number that fits all the data.  Set to a negitive
        factor to zero pad to a power of 2 and by at least at least the factor.
    window : string or tuple
        Type of window to apply to each DataBlock.  Valid options are the valid
        arguments to scipy.signal.get_window().  By default, don't window.
    return_means : bool
        Whether to return an array of the channed means.
    subtract_slope : bool
        Whether to subtract a linear function of time from each channel.

    Returns
    -------
    time_stream : array
        All the data in `Blocks` but concatenated along the time axis and
        padded with zeros such that the time axis is uniformly sampled and
        uninterupted.
    mask : array same shape as `time_stream`
        1.0 if data in the correspoonding `time_stream` element is filled 
        and 0 if the data was missing.  This is like a window where 
        time_stream = mask*real_data.
    dt : float
        The time step of the returned time stream.
    means : array (optional)
        The mean from each channel.
    """

    # Shape of all axes except the time axis.
    back_shape = Blocks[0].dims[1:]
    # Get the time sample spacing.
    Blocks[0].calc_time()
    dt = abs(sp.mean(sp.diff(Blocks[0].time)))
    # Find the beginning and the end of the time axis by looping through
    # blocks.
    # Also get the time axis and the mask
    # for calculating basis polynomials.
    unmask = sp.zeros((0, ) + back_shape, dtype=bool)
    time = sp.zeros((0, ), dtype=float)
    start_ind = []
    min_time = float('inf')
    max_time = 0.0
    #mean_time = 0.0
    #n_data_times = 0
    for Data in Blocks:
        Data.calc_time()
        start_ind.append(len(time))
        time = sp.concatenate((time, Data.time))
        this_unmask = sp.logical_not(ma.getmaskarray(Data.data))
        unmask = sp.concatenate((unmask, this_unmask), 0)
        # Often the start or the end of a scan is completly masked.  Make sure
        # we don't start till the first unmasked time and end at the last
        # unmasked time.
        time_unmask = sp.alltrue(ma.getmaskarray(Data.data), -1)
        time_unmask = sp.alltrue(time_unmask, -1)
        time_unmask = sp.alltrue(time_unmask, -1)
        if sp.alltrue(time_unmask):
            continue
        time_unmask = sp.logical_not(time_unmask)
        min_time = min(min_time, min(Data.time[time_unmask]))
        max_time = max(min_time, max(Data.time[time_unmask]))
        #mean_time += sp.sum(Data.time[time_unmask])
        #n_data_times += len(Data.time[time_unmask])
        # Ensure that the time sampling is uniform.
        if not (sp.allclose(abs(sp.diff(Data.time)), dt, rtol=0.1) and
                sp.allclose(abs(sp.mean(sp.diff(Data.time))), dt, rtol=0.001)):
            msg = ("Time sampling not uniformly spaced or Data Blocks don't "
                   "agree on sampling.")
            raise ce.DataError(msg)
        # Ensure the shapes are right.
        if Data.dims[1:] != back_shape:
            msg = ("All data blocks must have the same shape except the time "
                   "axis.")
            raise ce.DataError(msg)
    # Now calculate basis polynomials for the mean mode and the slope mode.
    polys = misc.ortho_poly(time[:, None, None, None], 2, unmask, 0)
    #mean_time /= n_data_times
    #if n_data_times == 0:
    #    n_data_times = 1
    # Very important to subtract the mean out of the signal, otherwise the
    # window coupling to the mean (0) mode will dominate everything. Can also
    # optionally take out a slope.
    # Old algorithm.
    #total_sum = 0.0
    #total_counts = 0
    #total_slope = 0.0
    #time_norm = 0.0
    #for Data in Blocks:
    #    total_sum += sp.sum(Data.data.filled(0), 0)
    #    total_counts += ma.count(Data.data, 0)
    #    total_slope += sp.sum(Data.data.filled(0)
    #                          * (Data.time[:,None,None,None] - mean_time), 0)
    #    time_norm += sp.sum(sp.logical_not(ma.getmaskarray(Data.data))
    #                        * (Data.time[:,None,None,None] - mean_time)**2, 0)
    #total_counts[total_counts == 0] = 1
    #time_norm[time_norm == 0.0] = 1
    #total_mean = total_sum / total_counts
    #total_slope /= time_norm
    # New algorithm.
    mean_amp = 0
    slope_amp = 0
    for ii, Data in enumerate(Blocks):
        si = start_ind[ii]
        this_nt = Data.dims[0]
        data = Data.data.filled(0)
        mean_amp += sp.sum(
            data * unmask[si:si + this_nt, ...] *
            polys[0, si:si + this_nt, ...], 0)
        slope_amp += sp.sum(
            data * unmask[si:si + this_nt, ...] *
            polys[1, si:si + this_nt, ...], 0)
    polys[0, ...] *= mean_amp
    polys[1, ...] *= slope_amp
    # Calculate the time axis.
    if min_time > max_time:
        min_time = 0
        max_time = 6 * dt
    if not ntime:
        ntime = (max_time - min_time) // dt + 1
    elif ntime < 0:
        # 0 pad by a factor of at least -ntime, but at most 10% more than this.
        time_min = -ntime * (max_time - min_time) / dt
        n_block = 1
        while n_block < time_min / 20.0:
            n_block *= 2
        ntime = (time_min // n_block + 1) * n_block

    time = sp.arange(ntime) * dt + min_time
    # Allowcate memory for the outputs.
    time_stream = sp.zeros((ntime, ) + back_shape, dtype=float)
    mask = sp.zeros((ntime, ) + back_shape, dtype=sp.float32)
    # Loop over all times and fill in the arrays.
    for ii, Data in enumerate(Blocks):
        this_nt = Data.dims[0]
        si = start_ind[ii]
        # Subtract the mean calculated above.
        this_data = Data.data.copy()
        this_data -= polys[0, si:si + this_nt, ...]
        # If desired, subtract of the linear function of time.
        if subtract_slope:
            #this_data -= (total_slope
            #              * (Data.time[:,None,None,None] - mean_time))
            this_data -= polys[1, si:si + this_nt, ...]
        # Find the first and last unmasked times.
        time_unmask = sp.alltrue(ma.getmaskarray(this_data), -1)
        time_unmask = sp.alltrue(time_unmask, -1)
        time_unmask = sp.alltrue(time_unmask, -1)
        if sp.alltrue(time_unmask):
            continue
        time_unmask = sp.logical_not(time_unmask)
        unmasked_ind, = sp.where(time_unmask)
        first_ind = min(unmasked_ind)
        last_ind = max(unmasked_ind)
        # Ensure that the time sampling is uniform.
        if not (sp.allclose(abs(sp.diff(Data.time)), dt, rtol=0.1) and
                sp.allclose(abs(sp.mean(sp.diff(Data.time))), dt, rtol=0.001)):
            msg = ("Time sampling not uniformly spaced or Data Blocks don't "
                   "agree on sampling.")
            raise ce.DataError(msg)
        # Ensure the shapes are right.
        if Data.dims[1:] != back_shape:
            msg = ("All data blocks must have the same shape except the time "
                   "axis.")
        # Apply an offset to the time in case the start of the Data Block
        # doesn't line up with the time array perfectly.
        offset = (time[sp.argmin(abs(time - Data.time[first_ind]))] -
                  Data.time[first_ind])
        # Generate window function.
        if window:
            window_function = sig.get_window(window, last_ind - first_ind + 1)
        for ii in range(first_ind, last_ind + 1):
            ind = sp.argmin(abs(time - (Data.time[ii] + offset)))
            if abs(time[ind] - (Data.time[ii])) < 0.5 * dt:
                if sp.any(mask[ind, ...]):
                    msg = "Overlapping times in Data Blocks."
                    raise ce.DataError(msg)
                if window:
                    window_value = window_function[ii - first_ind]
                else:
                    window_value = 1.0
                time_stream[ind, ...] = (window_value *
                                         this_data[ii, ...].filled(0.0))
                mask[ind, ...] = window_value * sp.logical_not(
                    ma.getmaskarray(this_data)[ii, ...])
    if return_means:
        return time_stream, mask, dt, polys[0, 0, ...]
    else:
        return time_stream, mask, dt