Пример #1
0
def emissive_radiance_old(emissivity, T, wl):
    """Radiance of a surface due to emission"""

    h = 6.62607004e-34  # m2 kg s-1
    c = 299792458  # m s-1
    numerator = 2.0 * h * (c**2)  # m4 kg s-1
    wl_m = wl * 1e-9
    numerator_per_lam5 = numerator * pow(wl_m, -5)  # kg s-1 m-1
    k = 1.380648520 - 23  # Boltzmann constant, m2 kg s-2 K-1
    denom = s.exp(h * c / (k * wl_m * T)) - 1.0  # dimensionless
    L = numerator_per_lam5 / denom  # Watts per m3

    cm2_per_m2, nm_per_m, uW_per_W = 10000, 1e9, 1e6
    conversion = cm2_per_m2 * nm_per_m * uW_per_W / s.pi  # -> uW nm-1 cm-2 sr-1
    L = L * conversion

    ddenom_dT = s.exp(h * c / (k * wl_m * T)) * h * c * (-1.0) / (pow(
        k * wl_m * T, 2)) * k * wl_m
    dL_dT = -numerator_per_lam5 / pow(denom, 2.0) * ddenom_dT * conversion

    L = L * emissivity
    dL_dT = dL_dT * emissivity
    L[s.logical_not(s.isfinite(L))] = 0
    dL_dT[s.logical_not(s.isfinite(dL_dT))] = 0
    return L, dL_dT
Пример #2
0
def filter_nana(ts_data, ts_ticks):

    if ts_data.ndim > 1:
        raise ValueError("filtering-NaN is only defined for" +
                         "one dimensional time series data.")
    ts_ticks = compress(logical_not(isnan(ts_data)), ts_ticks)
    ts_data = compress(logical_not(isnan(ts_data)), ts_data)

    return ts_data, ts_ticks
Пример #3
0
    def _check_bounds(self, x_new, y_new):
        """Check the inputs for being in the bounds of the interpolated data.

        Args:
            x_new (float array):
            
            y_new (float array):

        Returns:
            out_of_bounds (Boolean array): The mask on x_new and y_new of
            values that are NOT of bounds.
        """
        below_bounds_x = x_new < self._xlim[0]
        above_bounds_x = x_new > self._xlim[1]

        below_bounds_y = y_new < self._ylim[0]
        above_bounds_y = y_new > self._ylim[1]

        # !! Could provide more information about which values are out of bounds
        if self.bounds_error and below_bounds_x.any():
            raise ValueError("A value in x is below the interpolation "
                "range.")
        if self.bounds_error and above_bounds_x.any():
            raise ValueError("A value in x is above the interpolation "
                "range.")
        if self.bounds_error and below_bounds_y.any():
            raise ValueError("A value in y is below the interpolation "
                "range.")
        if self.bounds_error and above_bounds_y.any():
            raise ValueError("A value in y is above the interpolation "
                "range.")

        out_of_bounds = scipy.logical_not(scipy.logical_or(scipy.logical_or(below_bounds_x, above_bounds_x),
                                                           scipy.logical_or(below_bounds_y, above_bounds_y)))
        return out_of_bounds
Пример #4
0
def add_data_2_map(data, ra_inds, dec_inds, map, noise_i=None, weight=1):
    """Add a data masked array to a map.

    This function also adds the weight to the noise matrix for diagonal noise.
    """

    ntime = len(ra_inds)
    shape = sp.shape(map)
    if len(dec_inds) != ntime or len(data[:, 0]) != ntime:
        raise ValueError('Time axis of data, ra_inds and dec_inds must be'
                         ' same length.')
    if not noise_i is None and map.shape != noise_i.shape:
        raise ValueError('Inverse noise array must be the same size as the map'
                         ' or None.')

    for time_ind in range(ntime):
        if (ra_inds[time_ind] >= 0 and ra_inds[time_ind] < shape[0]
                and dec_inds[time_ind] >= 0 and dec_inds[time_ind] < shape[1]):
            # Get unmasked
            unmasked_inds = sp.logical_not(ma.getmaskarray(data[time_ind, :]))
            ind_map = (ra_inds[time_ind], dec_inds[time_ind], unmasked_inds)
            map[ind_map] += (weight * data)[time_ind, unmasked_inds]
            if not noise_i is None:
                if not hasattr(weight, '__iter__'):
                    noise_i[ind_map] += weight
                else:
                    noise_i[ind_map] += weight[unmasked_inds]
Пример #5
0
    def _check_bounds(self, x_new, y_new):
        """Check the inputs for being in the bounds of the interpolated data.

        Args:
            x_new (float array):
            
            y_new (float array):

        Returns:
            out_of_bounds (Boolean array): The mask on x_new and y_new of
            values that are NOT of bounds.
        """
        below_bounds_x = x_new < self._xlim[0]
        above_bounds_x = x_new > self._xlim[1]

        below_bounds_y = y_new < self._ylim[0]
        above_bounds_y = y_new > self._ylim[1]

        # !! Could provide more information about which values are out of bounds
        if self.bounds_error and below_bounds_x.any():
            raise ValueError("A value in x is below the interpolation "
                "range.")
        if self.bounds_error and above_bounds_x.any():
            raise ValueError("A value in x is above the interpolation "
                "range.")
        if self.bounds_error and below_bounds_y.any():
            raise ValueError("A value in y is below the interpolation "
                "range.")
        if self.bounds_error and above_bounds_y.any():
            raise ValueError("A value in y is above the interpolation "
                "range.")

        out_of_bounds = scipy.logical_not(scipy.logical_or(scipy.logical_or(below_bounds_x, above_bounds_x),
                                                           scipy.logical_or(below_bounds_y, above_bounds_y)))
        return out_of_bounds
Пример #6
0
def add_data_2_map(data, ra_inds, dec_inds, map, noise_i=None, weight=1):
    """Add a data masked array to a map.
    
    This function also adds the weight to the noise matrix for diagonal noise.
    """

    ntime = len(ra_inds)
    shape = sp.shape(map)
    if len(dec_inds) != ntime or len(data[:, 0]) != ntime:
        raise ValueError("Time axis of data, ra_inds and dec_inds must be" " same length.")
    if not noise_i is None and map.shape != noise_i.shape:
        raise ValueError("Inverse noise array must be the same size as the map" " or None.")

    for time_ind in range(ntime):
        if (
            ra_inds[time_ind] >= 0
            and ra_inds[time_ind] < shape[0]
            and dec_inds[time_ind] >= 0
            and dec_inds[time_ind] < shape[1]
        ):
            # Get unmasked
            unmasked_inds = sp.logical_not(ma.getmaskarray(data[time_ind, :]))
            ind_map = (ra_inds[time_ind], dec_inds[time_ind], unmasked_inds)
            map[ind_map] += (weight * data)[time_ind, unmasked_inds]
            if not noise_i is None:
                if not hasattr(weight, "__iter__"):
                    noise_i[ind_map] += weight
                else:
                    noise_i[ind_map] += weight[unmasked_inds]
Пример #7
0
def cross(series, cross=0, direction='cross'):
    """
    From http://stackoverflow.com/questions/10475488/calculating-crossing-intercept-points-of-a-series-or-dataframe

    Given a Series returns all the index values where the data values equal 
    the 'cross' value. 

    Direction can be 'rising' (for rising edge), 'falling' (for only falling 
    edge), or 'cross' for both edges
    """
    # Find if values are above or bellow yvalue crossing:
    above=series.values > cross
    below=scipy.logical_not(above)
    left_shifted_above = above[1:]
    left_shifted_below = below[1:]
    x_crossings = []
    # Find indexes on left side of crossing point
    if direction == 'rising':
        idxs = (left_shifted_above & below[0:-1]).nonzero()[0]
    elif direction == 'falling':
        idxs = (left_shifted_below & above[0:-1]).nonzero()[0]
    else:
        rising = left_shifted_above & below[0:-1]
        falling = left_shifted_below & above[0:-1]
        idxs = (rising | falling).nonzero()[0]

    # Calculate x crossings with interpolation using formula for a line:
    x1 = series.index.values[idxs]
    x2 = series.index.values[idxs+1]
    y1 = series.values[idxs]
    y2 = series.values[idxs+1]
    x_crossings = (cross-y1)*(x2-x1)/(y2-y1) + x1

    return x_crossings
Пример #8
0
def pixel_counts(data, ra_inds, dec_inds, pixel_hits, map_shape=(-1, -1)):
    """Counts the hits on each unique pixel.

    Returns pix_list, a list of tuples, each tuple is a (ra,dec) index on a
    map pixel hit on this scan.  The list only contains unique entries.  The
    array pixel_hits (preallocated for performance), is
    filled with the number of hits on each of these pixels as a function of
    frequency index. Only the entries pixel_hits[:len(pix_list),:]
    are meaningful.
    """

    if ra_inds.shape != dec_inds.shape or ra_inds.ndim != 1:
        raise ValueError('Ra and Dec arrays not properly shaped.')
    if (pixel_hits.shape[-1] != data.shape[-1]
            or pixel_hits.shape[0] < len(ra_inds)):
        raise ValueError('counts not allowcated to right shape.')

    pix_list = []
    for ii in range(len(ra_inds)):
        pix = (ra_inds[ii], dec_inds[ii])
        if ((map_shape[0] > -1 and pix[0] >= map_shape[0])
                or (map_shape[1] > -1 and pix[1] >= map_shape[1]) or pix[0] < 0
                or pix[1] < 0):
            continue
        elif not pix in pix_list:
            pix_list.append(pix)
        unmasked_freqs = sp.logical_not(ma.getmaskarray(data)[ii, :])
        pixel_hits[pix_list.index(pix), unmasked_freqs] += 1

    return pix_list
Пример #9
0
def pixel_counts(data, ra_inds, dec_inds, pixel_hits, map_shape=(-1, -1)):
    """Counts the hits on each unique pixel.

    Returns pix_list, a list of tuples, each tuple is a (ra,dec) index on a 
    map pixel hit on this scan.  The list only contains unique entries.  The
    array pixel_hits (preallowcated for performance), is
    filled with the number of hits on each of these pixels as a function of
    frequency index. Only the entries pixel_hits[:len(pix_list), :] 
    are meaningful.
    """

    if ra_inds.shape != dec_inds.shape or ra_inds.ndim != 1:
        raise ValueError("Ra and Dec arrays not properly shaped.")
    if pixel_hits.shape[-1] != data.shape[-1] or pixel_hits.shape[0] < len(ra_inds):
        raise ValueError("counts not allowcated to right shape.")

    pix_list = []
    for ii in range(len(ra_inds)):
        pix = (ra_inds[ii], dec_inds[ii])
        if (
            (map_shape[0] > -1 and pix[0] >= map_shape[0])
            or (map_shape[1] > -1 and pix[1] >= map_shape[1])
            or pix[0] < 0
            or pix[1] < 0
        ):
            continue
        elif not pix in pix_list:
            pix_list.append(pix)
        unmasked_freqs = sp.logical_not(ma.getmaskarray(data)[ii, :])
        pixel_hits[pix_list.index(pix), unmasked_freqs] += 1

    return pix_list
Пример #10
0
    def assess_calibration(self):
        """Assess if PredPol is calibrated by conditioning on predicted intensity
        and checking the correlation between number of crimes and demographics.

        Returns: a 2D array where the first dimension is the number of days in
        the test set and the second dimension is the number of bins for the
        range of predicted intensities, as computed by `sp.histogram_bin_edges`.
        The entry in the ith row and jth column is the Pearson correlation
        coefficient between race and actual number of crimes in the jth bin of
        predicted intensity for the ith day.
        """
        black = self.pred_obj.grid_cells.black
        not_nan = sp.logical_not(sp.isnan(black.values))

        bins = sp.histogram_bin_edges(self.get_predicted_intensities(), bins='auto')
        correlations = sp.empty((len(self.lambda_columns), len(bins)))
        correlations[:] = sp.nan
        for i, (lambda_col, actual_col) in self._iterator():
            idx_bins = sp.digitize(self.results[lambda_col], bins)
            for j in range(len(bins)):
                idx_selected = sp.logical_and(idx_bins == j, not_nan)
                if sp.sum(idx_selected) > 2:
                    actual = self.results.loc[idx_selected, actual_col]
                    demographics = black.loc[idx_selected]
                    correlations[i, j] = sp.stats.pearsonr(actual, demographics)[0]
        return correlations
Пример #11
0
    def plot_mat(self):

        ###
        da = sp.copy(self._mat["DA"])
        if sp.trace(da) != 0.:
            da = utils.getCorrelationMatrix(da)
        w = (self._mat["WE"] > 0.) & (self._mat["NB"] > 10)
        da[sp.logical_not(w)] = sp.nan

        fig = plt.figure()
        ax = fig.add_subplot(111)
        plt.imshow(da, origin="lower", interpolation='nearest')
        cbar = plt.colorbar()
        plt.grid(True)
        cbar.formatter.set_powerlimits((0, 0))
        cbar.update_ticks()
        plt.show()

        ###
        for k in range(5):
            x = sp.arange(sp.diag(self._mat["DA"], k=k).size)
            y = sp.diag(self._mat["DA"], k=k)
            w = (sp.diag(self._mat["WE"], k=k) > 0.) & (sp.diag(
                self._mat["NB"], k=k) > 10.)
            x = x[w]
            y = y[w]
            plt.plot(x, y, linewidth=4, alpha=0.7)
        plt.grid()
        plt.show()

        return
Пример #12
0
    def plot_2d(self, log=False):
        crt = 1. / scipy.constants.degree

        if ((self._we > 0.).sum() == 0):
            print("no data")
            return

        origin = 'lower'
        extent = [
            crt * self._rt_min, crt * self._rt_max, self._rp_min, self._rp_max
        ]
        if (self._correlation == 'o_f' or self._correlation == 'f_f2'):
            origin = 'upper'
            extent = [
                crt * self._rt_min, crt * self._rt_max, self._rp_max,
                self._rp_min
            ]

        yyy = sp.copy(self._da)
        w = (self._we > 0.) & (self._nb > 10.)
        yyy[sp.logical_not(w)] = float('nan')
        if log:
            yyy[w] = sp.log10(sp.absolute(yyy[w]))
        yyy = utils.convert1DTo2D(yyy, self._np, self._nt)

        fig = plt.figure()
        ax = fig.add_subplot(111)
        ax.set_xticks([
            i for i in sp.arange(crt * self._rt_min, crt * self._rt_max, crt *
                                 self._binSizeT * 10)
        ])
        ax.set_yticks([
            i
            for i in sp.arange(self._rp_min, self._rp_max, self._binSizeP * 10)
        ])

        plt.imshow(yyy,
                   origin=origin,
                   extent=extent,
                   interpolation='nearest',
                   aspect='auto')
        cbar = plt.colorbar()

        if not log:
            cbar.set_label(r'$\xi(\lambda_{1}/\lambda_{2},\theta)$', size=40)
        else:
            cbar.set_label(
                r'$ \log10 \, |\xi(\lambda_{1}/\lambda_{2},\theta)| $',
                size=40)

        plt.xlabel(r'$\theta \, [\mathrm{deg}]$', fontsize=40)
        plt.ylabel(r'$\lambda_{1}/\lambda_{2}$', fontsize=40)
        plt.grid(True)
        cbar.formatter.set_powerlimits((0, 0))
        cbar.update_ticks()

        plt.show()

        return
Пример #13
0
def readSRI_h5(fn,params,timelims = None):
    assert isinstance(params,(tuple,list))
    h5fn = Path(fn).expanduser()
    '''This will read the SRI formated h5 files for RISR and PFISR.'''
    coordnames = 'Spherical'

        # Set up the dictionary to find the data
    pathdict = {'Ne':('/FittedParams/Ne', None),
                'dNe':('/FittedParams/Ne',None),
                'Vi':('/FittedParams/Fits',   (0,3)),
                'dVi':('/FittedParams/Errors',(0,3)),
                'Ti':('/FittedParams/Fits',   (0,1)),
                'dTi':('/FittedParams/Errors',(0,1)),
                'Te':('/FittedParams/Fits',  (-1,1)),
                'Ti':('/FittedParams/Errors',(-1,1))}

    with h5py.File(str(h5fn),'r',libver='latest') as f:
        # Get the times and time lims
        times = f['/Time/UnixTime'].value
        # get the sensor location
        sensorloc = np.array([f['/Site/Latitude'].value,
                              f['/Site/Longitude'].value,
                              f['/Site/Altitude'].value])
        # Get the locations of the data points
        rng = f['/FittedParams/Range'].value / 1e3
        angles = f['/BeamCodes'][:,1:3]

    nt = times.shape[0]
    if timelims is not None:
        times = times[(times[:,0]>= timelims[0]) & (times[:,1]<timelims[1]) ,:]
        nt = times.shape[0]
# allaz, allel corresponds to rng.ravel()
    allaz = np.tile(angles[:,0],rng.shape[1])
    allel = np.tile(angles[:,1],rng.shape[1])

    dataloc =np.vstack((rng.ravel(),allaz,allel)).T
    # Read in the data
    data = {}
    with h5py.File(str(h5fn),'r',libver='latest') as f:
        for istr in params:
            if not istr in pathdict.keys(): #list() NOT needed
                logging.error('{} is not a valid parameter name.'.format(istr))
                continue
            curpath = pathdict[istr][0]
            curint = pathdict[istr][-1]

            if curint is None: #3-D data
                tempdata = f[curpath]
            else: #5-D data -> 3-D data
                tempdata = f[curpath][:,:,:,curint[0],curint[1]]
            data[istr] = np.array([tempdata[iT,:,:].ravel() for iT in range(nt)]).T

    # remove nans from SRI file
    nanlog = sp.any(sp.isnan(dataloc),1)
    keeplog = sp.logical_not(nanlog)
    dataloc = dataloc[keeplog]
    for ikey in data.keys():
        data[ikey]= data[ikey][keeplog]
    return (data,coordnames,dataloc,sensorloc,times)
Пример #14
0
def readSRI_h5(fn,params,timelims = None):
    assert isinstance(params,(tuple,list))
    h5fn = Path(fn).expanduser()
    '''This will read the SRI formated h5 files for RISR and PFISR.'''
    coordnames = 'Spherical'

        # Set up the dictionary to find the data
    pathdict = {'Ne':('/FittedParams/Ne', None),
                'dNe':('/FittedParams/Ne',None),
                'Vi':('/FittedParams/Fits',   (0,3)),
                'dVi':('/FittedParams/Errors',(0,3)),
                'Ti':('/FittedParams/Fits',   (0,1)),
                'dTi':('/FittedParams/Errors',(0,1)),
                'Te':('/FittedParams/Fits',  (-1,1)),
                'Ti':('/FittedParams/Errors',(-1,1))}

    with h5py.File(str(h5fn),'r',libver='latest') as f:
        # Get the times and time lims
        times = f['/Time/UnixTime'].value
        # get the sensor location
        sensorloc = np.array([f['/Site/Latitude'].value,
                              f['/Site/Longitude'].value,
                              f['/Site/Altitude'].value])
        # Get the locations of the data points
        rng = f['/FittedParams/Range'].value / 1e3
        angles = f['/BeamCodes'][:,1:3]

    nt = times.shape[0]
    if timelims is not None:
        times = times[(times[:,0]>= timelims[0]) & (times[:,1]<timelims[1]) ,:]
        nt = times.shape[0]
# allaz, allel corresponds to rng.ravel()
    allaz = np.tile(angles[:,0],rng.shape[1])
    allel = np.tile(angles[:,1],rng.shape[1])

    dataloc =np.vstack((rng.ravel(),allaz,allel)).T
    # Read in the data
    data = {}
    with h5py.File(str(h5fn),'r',libver='latest') as f:
        for istr in params:
            if not istr in pathdict.keys(): #list() NOT needed
                logging.error('{} is not a valid parameter name.'.format(istr))
                continue
            curpath = pathdict[istr][0]
            curint = pathdict[istr][-1]

            if curint is None: #3-D data
                tempdata = f[curpath]
            else: #5-D data -> 3-D data
                tempdata = f[curpath][:,:,:,curint[0],curint[1]]
            data[istr] = np.array([tempdata[iT,:,:].ravel() for iT in range(nt)]).T

    # remove nans from SRI file
    nanlog = sp.any(sp.isnan(dataloc),1)
    keeplog = sp.logical_not(nanlog)
    dataloc = dataloc[keeplog]
    for ikey in data.keys():
        data[ikey]= data[ikey][keeplog]
    return (data,coordnames,dataloc,sensorloc,times)
Пример #15
0
def rebin(Data, n_bins_combined) :
    """The function that acctually does the rebinning on a Data Block."""
    
    nt = Data.data.shape[0]
    new_nt = nt // n_bins_combined
    new_shape = (new_nt,) + Data.data.shape[1:]
    unmask = sp.logical_not(ma.getmaskarray(Data.data))
    data = Data.data.filled(0)
    # Allowcate memeory for the rebinned data.
    new_data = ma.zeros(new_shape, dtype=data.dtype)
    counts = sp.zeros(new_shape, dtype=int)
    # Add up the bins to be combined.
    for ii in range(n_bins_combined):
        new_data += data[ii:new_nt * n_bins_combined:n_bins_combined,...]
        counts += unmask[ii:new_nt * n_bins_combined:n_bins_combined,...]
    new_data[counts == 0] = ma.masked
    counts[counts == 0] = 1
    new_data /= counts
    Data.set_data(new_data)
    # Now deal with all the other records that aren't the main data.
    for field_name in Data.field.iterkeys():
        # DATE-OBS is a string field so we have to write special code for it.
        if field_name == "DATE-OBS":
            time_field = Data.field[field_name]
            new_field = sp.empty(new_nt, dtype=Data.field[field_name].dtype)
            # Convert to float, average, then convert back to a string.
            time_float = utils.time2float(time_field)
            for ii in range(new_nt):
                tmp_time = sp.mean(time_float[n_bins_combined * ii
                                              : n_bins_combined * (ii + 1)])
                new_field[ii] = utils.float2time(tmp_time)
            Data.set_field(field_name, new_field, 
                       axis_names=Data.field_axes[field_name],
                       format=Data.field_formats[field_name])
            continue
        # Only change fields that have a 'time' axis.
        try:
            time_axis = list(Data.field_axes[field_name]).index('time')
        except ValueError:
            continue
        # For now, the time axis has to be the first axis.
        if time_axis != 0:
            msg = "Expected time to be the first axis for all fields."
            raise NotImplementedError(msg)
        field_data = Data.field[field_name]
        if not field_data.dtype.name == "float64":
            msg = "Field data type is not float. Handle explicitly."
            raise NotImplementedError(msg)
        new_field = sp.empty(field_data.shape[:time_axis] + (new_nt,) 
                             + field_data.shape[time_axis + 1:],
                             dtype=field_data.dtype)
        for ii in range(new_nt):
            tmp_data = sp.sum(field_data[n_bins_combined * ii
                                         :n_bins_combined * (ii + 1),...], 0)
            tmp_data /= n_bins_combined
            new_field[ii,...] = tmp_data
        Data.set_field(field_name, new_field, 
                       axis_names=Data.field_axes[field_name],
                       format=Data.field_formats[field_name])
Пример #16
0
    def _p(self, s):
        """Transition probability function

        Parameters
        -----------
        s : int
            Integer representing the province

        Returns
        -----------
        sprime : list
           Each element is a tuple with the transition state
           and the probability of transitioning to that state.
           
        """

        # I enumerate states such that
        # 0 = [0, ...., 0]
        # 1 = [1, 0, 0, 0, ... 0 ]
        # 2 = [0, 1, 0, 0, ... 0 ]
        # 2^k = [0, 0, ..., 0, 1]
        # 2^(k+1) - 1 = [1, 1, ..., 1, 1]
        sbinary = int2binary(s, width=self.k)[::-1]
        # indices of Revolting provinces
        R = sbinary.nonzero()[0].tolist()

        # indices of Non-revolting provinces
        C = sp.logical_not(sbinary).nonzero()[0]
        P = [1]
        newstate = [s]
        for i, si in enumerate(sbinary):
            ## Calculate
            Ci = list(set(C) - set([i]))
            if Ci:
                dminc = self.D[sp.ix_([i], Ci)].min()
            else:
                dminc = self.dmax

            Ri = list(set(R) - set([i]))
            if Ri: 
                dminr = sp.c_[self.D[sp.ix_([i], Ri)]].min()
            else:
                dminr = self.dmax
            # if i in revolt
            if si:
                Si = s - 2**i
                Pi = dminr / dminc
            # if i not in revolt
            else:
                Si = s + 2**i
                Pi = dminc / dminr
            P.append(Pi)
            newstate.append(Si)
        newstate = sp.array(newstate)
        P = sp.array(P)
        P /= P.sum()
        return (P.tolist(), newstate.tolist())
Пример #17
0
def goodGIW(time, shot, name="c_W"):
    """extract values which are only valid, otherwise place in zeros"""
    temp = GIWData(shot, data=name)
    interp = scipy.interpolate.interp1d(temp.time,
                                        temp.data,
                                        bounds_error=False)
    output = interp(time)
    output[ scipy.logical_not(scipy.isfinite(output))] = 0. #force all bad values to zero
    return output   
Пример #18
0
 def get_time_trapped(self, trap_num=None, straight_shots=False):
     #adjusted this function to isolate flies that went straight to traps
     mask_trapped = self.mode == self.Mode_Trapped
     if straight_shots:
         mask_trapped = mask_trapped & scipy.logical_not(self.ever_tracked)
     if trap_num is None:
         return self.t_in_trap[mask_trapped]
     else:
         mask_trapped_in_num = mask_trapped & (self.trap_num == trap_num)
         return self.t_in_trap[mask_trapped_in_num]
Пример #19
0
    def msd_of_linker_end_points(self, fregions, flabels, rregions, rlabels):
        """
		takes definitions of regions and labels for forward and reverse
		strands assuming each region in the forward direction coorosponds to a region
		in the reverse direction and returns the mean squaared distance
		of the start and end points of each region. 
		"""
        if len(fregions) != len(rregions):
            return (float('Inf'))
        flinks = fregions[scipy.logical_not(flabels)]
        rlinks = rregions[scipy.logical_not(rlabels)]
        s = sum([
            self._d[t1, t2] for t1, t2 in zip(flinks[:, 0], rlinks[:, 1] - 1)
            if self._d[t1, t2]
        ]) / len(flinks)
        f = sum([
            self._d[t1, t2] for t1, t2 in zip(flinks[:, 1] - 1, rlinks[:, 0])
            if self._d[t1, t2]
        ]) / len(flinks)
        return ((s + f) / 2)
Пример #20
0
    def calcaverage_sigmacutloop(self,data,mask=None,noise=None,Nsigma=3.0,Nitmax=10,fixmean=None,verbose=0,saveused=False,median_firstiteration=True):
        """
        mask must have same dimensions than data. If mask[x]=True, then data[x] is not used.
        noise must have same dimensions than data. If noise != None, then the error weighted mean is calculated.
        if saveused, then self.use contains array of datapoints used, and self.clipped the array of datapoints clipped
        median_firstiteration: in the first iteration, use the median instead the mean. This is more robust if there is a population of bad measurements
        """

        self.reset()
        #self.i=0
        #self.converged=False
        while ((self.i<Nitmax) or (Nitmax==0)) and (not self.converged):
            medianflag = median_firstiteration and (self.i==0) and (Nsigma!=None)
            if noise is None:
                self.calcaverage_sigmacut(data,mask=mask,Nsigma=Nsigma,fixmean=fixmean,medianflag=medianflag,verbose=verbose)
            else:
                self.calcaverage_errorcut(data,mask=mask,noise=noise,Nsigma=Nsigma,medianflag=medianflag,verbose=verbose)
            #if verbose>=2:
             #   print(self.__str__())
            # Not converged???
            if self.stdev==None or self.stdev==0.0 or self.mean==None:
                self.converged=False
                break
            # Only do a sigma cut if wanted
            if Nsigma == None or Nsigma == 0.0:
                self.converged=True
                break
            # No changes anymore? If yes converged!!!
            if (self.i>0) and (self.Nchanged==0):
                self.converged=True
                break
            self.i+=1

        if saveused:
            if mask is None:
                self.clipped = scipy.logical_not(self.use)
            else:
                self.clipped = scipy.logical_not(self.use) &  scipy.logical_not(mask)
        else:
            del(self.use)
        return(0)
Пример #21
0
def velocity_dof(domain, ax):
    # Calculate velocity dof numbers forr each cell
    rm = roll( domain, 1, axis=ax )
    type_3 = logical_and( domain, rm )
    type_2 = logical_or(  domain, rm )
    
    dof = cumsum( logical_not( logical_or( type_3, type_2 ) ) ).reshape( domain.shape ) - 1
    # Do logic to figure out type 2 and 3
    dof[type_2 == 1] = -2
    dof[type_3 == 1] = -3

    return dof.astype(int64)
Пример #22
0
def calcArealDens(l, te, halfte, rho, tped, tcore, nped, ncore):

    minrho = scipy.argmin(rho)
    spline1 = scipy.interpolate.interp1d(rho[:minrho + 1],
                                         l[:minrho + 1],
                                         bounds_error=False,
                                         kind='linear')

    spline2 = scipy.interpolate.interp1d(rho[minrho:],
                                         l[minrho:],
                                         bounds_error=False,
                                         kind='linear')

    # step 3, find te rho locations
    ne = GIWprofiles.ne(GIWprofiles.te2rho2(te, tcore, tped), ncore, nped)

    rhohalfte = GIWprofiles.te2rho2(halfte, tcore, tped)

    bounds = scipy.array([rho[minrho], 1.])
    boundte = GIWprofiles.te(bounds, tcore, tped)
    bndidx = scipy.searchsorted(
        halfte, boundte)  #add proper endpoints to the temperature array
    rhohalfte = scipy.insert(rhohalfte, bndidx,
                             bounds)  #assumes that Te is positively increasing

    #step 4, find l location for those 1/2 te locations AND rho=1 for endpoints
    l1 = spline1(rhohalfte)
    deltal1 = abs(l1[:-1] - l1[1:])
    deltal1[scipy.logical_not(scipy.isfinite(deltal1))] = 0.

    l2 = spline2(rhohalfte)
    deltal2 = abs(l2[:-1] - l2[1:])
    deltal2[scipy.logical_not(scipy.isfinite(deltal2))] = 0.

    #plt.semilogx(te,ne*(deltal1*deltal2)/1e19, '.')
    #plt.xlabel('deltal2')
    #plt.show()

    return pow(ne, 2) * (deltal1 + deltal2)
Пример #23
0
def weights2(inp, te, shot, time):
    """ pull out data vs time necessary to calculate the weights"""
    #condition initialized values

    output = scipy.zeros((len(time), 2))
    r = inp[0]
    z = inp[1]
    l = inp[2]

    #load GIW ne, Te data

    tped = goodGIW(time, shot, name="t_e_ped")
    tcore = goodGIW(time, shot, name="t_e_core")

    nped = goodGIW(time, shot, name="n_e_ped")
    ncore = goodGIW(time, shot, name="n_e_core")

    good = scipy.arange(len(time))[scipy.logical_and(
        ncore != 0, tcore != 0)]  #take only good data

    #use spline of the GIW data to solve for the proper Te, otherwise dont evaluate

    logte = scipy.log(te)
    halfte = scipy.exp(logte[1:] / 2. +
                       logte[:-1] / 2.)  #not going to worry about endpoints
    # because I trust te is big enough to be larger than the range of the profile

    #step 1, use array of r,z values to solve for rho

    eq = eqtools.AUGDDData(shot)
    rho = eq.rz2rho('psinorm', r, z, time[good], each_t=True,
                    sqrt=True)  #solve at each time
    rhomin = scipy.nanmin(rho, axis=1)
    temax = GIWprofiles.te(rhomin, tcore[good], tped[good])

    idx = 0

    #step 2, construct 2 splines of l(rho)
    for i in good:
        temp = calcArealDens(l, te, halfte, rho[idx], tped[i], tcore[i],
                             nped[i], ncore[i])
        temp[scipy.logical_not(scipy.isfinite(temp))] = 0.
        temp = scipy.sum(temp)
        output[i, 0] = temp
        output[i, 1] = temax[idx]
        idx += 1
        #print(idx),

    #step 8, return values for storage
    return output
Пример #24
0
 def flush_buffers(self):
     """Write to file, and refresh the memory map object"""
     if self.format == 'ENVI':
         if self.write:
             for row, frame in self.frames.items():
                 valid = s.logical_not(s.isnan(frame[:, 0]))
                 if self.file.metadata['interleave'] == 'bil':
                     self.memmap[row, :, valid] = frame[valid, :].T
                 else:
                     self.memmap[row, valid, :] = frame[valid, :]
         self.frames = OrderedDict()
         del self.file
         self.file = envi.open(self.fname + '.hdr', self.fname)
         self.open_map_with_retries()
Пример #25
0
    def plot_2d(self, x_power=0):

        if ((self._we > 0.).sum() == 0):
            print("no data")
            return

        origin = 'lower'
        extent = [self._rt_min, self._rt_max, self._rp_min, self._rp_max]
        if (self._correlation == 'o_f' or self._correlation == 'f_f2'):
            origin = 'upper'
            extent = [self._rt_min, self._rt_max, self._rp_max, self._rp_min]

        yyy = sp.copy(self._da)
        w = (self._we > 0.) & (self._nb > 10.)
        yyy[sp.logical_not(w)] = float('nan')
        xxx = utils.convert1DTo2D(self._r, self._np, self._nt)
        yyy = utils.convert1DTo2D(yyy, self._np, self._nt)

        coef = sp.power(xxx, x_power)

        fig = plt.figure()
        ax = fig.add_subplot(111)
        #ax.set_xticks([ i for i in sp.arange(self._minX2D-50., self._maxX2D+50., 50.) ])
        #ax.set_yticks([ i for i in sp.arange(self._minY2D-50., self._maxY2D+50., 50.) ])

        plt.imshow(coef * yyy,
                   origin=origin,
                   extent=extent,
                   interpolation='nearest')
        cbar = plt.colorbar()

        if (x_power == 0):
            cbar.set_label(r'$\xi(\, r_{\parallel},r_{\perp} \,)$', size=40)
        if (x_power == 1):
            cbar.set_label(r'$|r|.\xi(\, r_{\parallel},r_{\perp} \,)$',
                           size=40)
        if (x_power == 2):
            cbar.set_label(r'$|r|^{2}.\xi(\, r_{\parallel},r_{\perp} \,)$',
                           size=40)

        plt.xlabel(r'$r_{\perp} \, [h^{-1} \, \rm{Mpc}]$', fontsize=40)
        plt.ylabel(r'$r_{\parallel} \, [h^{-1} \, \rm{Mpc}]$', fontsize=40)
        plt.grid(True)
        cbar.formatter.set_powerlimits((0, 0))
        cbar.update_ticks()

        plt.show()

        return
Пример #26
0
 def compare_attributes(atts1,atts2):
     self.assertEqual(len(atts1.keys()),len(atts2.keys()),"{}".format(nameRun))
     self.assertListEqual(sorted(atts1.keys()),sorted(atts2.keys()),"{}".format(nameRun))
     for item in atts1:
         nequal = True
         if isinstance(atts1[item],numpy.ndarray):
             nequal = sp.logical_not(sp.array_equal(atts1[item],atts2[item]))
         else:
             nequal = atts1[item]!=atts2[item]
         if nequal:
             print("WARNING: {}: not exactly equal, using allclose for {}".format(nameRun,item))
             print(atts1[item],atts2[item])
             allclose = sp.allclose(atts1[item],atts2[item])
             self.assertTrue(allclose,"{}".format(nameRun))
     return
Пример #27
0
def masked_subtract_mean(data, mask, axis):
    """Subtracts the mean from data along axis given a mask."""

    # Slice for broadcasting to the origional shape.
    up_broad = [slice(None)] * data.ndim
    up_broad[axis] = None
    up_broad = tuple(up_broad)
    # Calculate the mean.
    un_mask = sp.logical_not(mask)
    counts = sp.sum(un_mask, axis)
    counts[counts == 0] = 1
    mean = sp.sum(data * un_mask, axis) / counts
    # Subtract the mean.
    data = data - mean[up_broad] * un_mask
    return data
Пример #28
0
def _van_rossum_multiunit_dist_for_trial_pair(a, b, weighting, tau, kernel):
    if kernel is None:
        spike_counts = sp.atleast_2d([st.size for st in a + b])
        k_dist = spike_counts.T * (spike_counts - spike_counts.T)
    else:
        k_dist = kernel.summed_dist_matrix(a + b)

    non_diagonal = sp.logical_not(sp.eye(len(a)))
    summed_population = (sp.trace(k_dist) - sp.trace(k_dist, len(a)) -
                         sp.trace(k_dist, -len(a)))
    labeled_line = (sp.sum(k_dist[:len(a), :len(a)][non_diagonal]) +
                    sp.sum(k_dist[len(a):, len(a):][non_diagonal]) -
                    sp.sum(k_dist[:len(a), len(a):][non_diagonal]) -
                    sp.sum(k_dist[len(a):, :len(a)][non_diagonal]))
    return sp.sqrt(summed_population + weighting * labeled_line)
Пример #29
0
def masked_subtract_mean(data, mask, axis):
    """Subtracts the mean from data along axis given a mask."""

    # Slice for broadcasting to the origional shape.
    up_broad = [slice(None)] * data.ndim
    up_broad[axis] = None
    up_broad = tuple(up_broad)
    # Calculate the mean.
    un_mask = sp.logical_not(mask)
    counts = sp.sum(un_mask, axis)
    counts[counts == 0] = 1
    mean = sp.sum(data * un_mask, axis) / counts
    # Subtract the mean.
    data = data - mean[up_broad] * un_mask
    return data
Пример #30
0
def nothing(noth):
    # If requested, remove the time gradient from all channels.
    if remove_slope:
        un_mask = sp.logical_not(ma.getmaskarray(NoiseData.data))
        NoiseData.calc_time()
        time = NoiseData.time
        n_time = len(time)
        # Test if the mask is the same for all slices.  If it is, that greatly
        # reduces the work as we only have to generate one set of polynomials.
        all_masks_same = True
        for jj in range(n_time):
            if sp.all(un_mask[jj,...] == un_mask[jj,0,0,0]):
                continue
            else:
                all_masks_same = False
                break
        if all_masks_same:
            polys = misc.ortho_poly(time, 2, un_mask[:,0,0,0], 0)
            polys.shape = (2, len(time), 1, 1, 1)
        else:
            polys = misc.ortho_poly(time[:,None,None,None], 2, un_mask, 0)
        # Subtract the slope mode (1th mode) out of the NoiseData.
        slope_amps = sp.sum(polys[1,...] * un_mask * NoiseData.data.filled(0),
                            0)
        NoiseData.data -= polys[1,...] * slope_amps
    # Iteratively flag on sliding scale to get closer and closer to desired
    # threshold.
    n_time = Data.data.shape[0]
    max_thres = sp.sqrt(n_time)/2.
    n_iter = 3
    thresholds = (max_thres ** (n_iter - 1 - sp.arange(n_iter))
                 * thres ** sp.arange(n_iter)) ** (1./(n_iter - 1))
    for threshold in thresholds:
        # Get the deviation from the mean.
        residuals = ma.anom(NoiseData.data, 0).filled(0)
        # Get indices above the threshold.
        mask = abs(residuals) > threshold * ma.std(NoiseData.data, 0)
        # Mask the data.
        Data.data[mask] = ma.masked
        NoiseData.data[mask] = ma.masked
    
    # Now flag for very noisey channels.
    if max_noise_factor > 0:
        vars = ma.var(NoiseData.data, 0)
        mean_vars = ma.mean(vars, -1).filled(0)
        bad_chans = vars.filled(0) > max_noise_factor * mean_vars[:,:,None]
        Data.data[:,bad_chans] = ma.masked
        NoiseData.data[:,bad_chans] = ma.masked
Пример #31
0
def nothing(noth):
    # If requested, remove the time gradient from all channels.
    if remove_slope:
        un_mask = sp.logical_not(ma.getmaskarray(NoiseData.data))
        NoiseData.calc_time()
        time = NoiseData.time
        n_time = len(time)
        # Test if the mask is the same for all slices.  If it is, that greatly
        # reduces the work as we only have to generate one set of polynomials.
        all_masks_same = True
        for jj in range(n_time):
            if sp.all(un_mask[jj, ...] == un_mask[jj, 0, 0, 0]):
                continue
            else:
                all_masks_same = False
                break
        if all_masks_same:
            polys = misc.ortho_poly(time, 2, un_mask[:, 0, 0, 0], 0)
            polys.shape = (2, len(time), 1, 1, 1)
        else:
            polys = misc.ortho_poly(time[:, None, None, None], 2, un_mask, 0)
        # Subtract the slope mode (1th mode) out of the NoiseData.
        slope_amps = sp.sum(polys[1, ...] * un_mask * NoiseData.data.filled(0),
                            0)
        NoiseData.data -= polys[1, ...] * slope_amps
    # Iteratively flag on sliding scale to get closer and closer to desired
    # threshold.
    n_time = Data.data.shape[0]
    max_thres = sp.sqrt(n_time) / 2.
    n_iter = 3
    thresholds = (max_thres**(n_iter - 1 - sp.arange(n_iter)) *
                  thres**sp.arange(n_iter))**(1. / (n_iter - 1))
    for threshold in thresholds:
        # Get the deviation from the mean.
        residuals = ma.anom(NoiseData.data, 0).filled(0)
        # Get indices above the threshold.
        mask = abs(residuals) > threshold * ma.std(NoiseData.data, 0)
        # Mask the data.
        Data.data[mask] = ma.masked
        NoiseData.data[mask] = ma.masked

    # Now flag for very noisey channels.
    if max_noise_factor > 0:
        vars = ma.var(NoiseData.data, 0)
        mean_vars = ma.mean(vars, -1).filled(0)
        bad_chans = vars.filled(0) > max_noise_factor * mean_vars[:, :, None]
        Data.data[:, bad_chans] = ma.masked
        NoiseData.data[:, bad_chans] = ma.masked
 def test_off_map(self) :
     Data = self.blocks[0]
     Data.calc_freq()
     map = self.map
     map[:,:,:] = 0.0
     Data.data[:,:,:,:] = 0.0
     # Rig the pointing but put one off the map.
     def rigged_pointing() :
         Data.ra = map.get_axis('ra')[range(10)]
         Data.dec = map.get_axis('dec')[range(10)]
         Data.ra[3] = Data.ra[3] - 8.0
     Data.calc_pointing = rigged_pointing
     smd.sub_map(Data, map)
     self.assertTrue(sp.alltrue(ma.getmaskarray(Data.data[3,:,:,:])))
     self.assertTrue(sp.alltrue(sp.logical_not(
                 ma.getmaskarray((Data.data[[0,1,2,4,5,6,7,8,9],:,:,:])))))
Пример #33
0
def _van_rossum_multiunit_dist_for_trial_pair(a, b, weighting, tau, kernel):
    if kernel is None:
        spike_counts = sp.atleast_2d([st.size for st in a + b])
        k_dist = spike_counts.T * (spike_counts - spike_counts.T)
    else:
        k_dist = kernel.summed_dist_matrix(a + b)

    non_diagonal = sp.logical_not(sp.eye(len(a)))
    summed_population = (
        sp.trace(k_dist) - sp.trace(k_dist, len(a)) - sp.trace(k_dist, -len(a)))
    labeled_line = (
        sp.sum(k_dist[:len(a), :len(a)][non_diagonal]) +
        sp.sum(k_dist[len(a):, len(a):][non_diagonal]) -
        sp.sum(k_dist[:len(a), len(a):][non_diagonal]) -
        sp.sum(k_dist[len(a):, :len(a)][non_diagonal]))
    return sp.sqrt(summed_population + weighting * labeled_line)
Пример #34
0
    def load_rt(self, fn):
        """Load the results of a LibRadTran run."""

        wl, rdn0,   irr = s.loadtxt(self.lut_dir+'/LUT_'+fn+'_alb0.out').T
        wl, rdn025, irr = s.loadtxt(self.lut_dir+'/LUT_'+fn+'_alb025.out').T
        wl, rdn05,  irr = s.loadtxt(self.lut_dir+'/LUT_'+fn+'_alb05.out').T

        # Replace a few zeros in the irradiance spectrum via interpolation
        good = irr > 1e-15
        bad = s.logical_not(good)
        irr[bad] = interp1d(wl[good], irr[good])(wl[bad])

        # Translate to Top of Atmosphere (TOA) reflectance
        rhoatm = rdn0 / 10.0 / irr * s.pi  # Translate to uW nm-1 cm-2 sr-1
        rho025 = rdn025 / 10.0 / irr * s.pi
        rho05 = rdn05 / 10.0 / irr * s.pi

        # Resample TOA reflectances to simulate the instrument observation
        rhoatm = resample_spectrum(rhoatm, wl, self.wl, self.fwhm)
        rho025 = resample_spectrum(rho025, wl, self.wl, self.fwhm)
        rho05 = resample_spectrum(rho05,  wl, self.wl, self.fwhm)
        irr = resample_spectrum(irr,    wl, self.wl, self.fwhm)

        # Calculate some atmospheric optical constants
        sphalb = 2.8*(2.0*rho025-rhoatm-rho05)/(rho025-rho05)
        transm = (rho05-rhoatm)*(2.0-sphalb)

        # For now, don't estimate this term!!
        # TODO: Have LibRadTran calculate it directly
        transup = s.zeros(self.wl.shape)

        # Get solar zenith, translate to irradiance at zenith = 0
        with open(self.lut_dir+'/LUT_'+fn+'.zen', 'r') as fin:
            output = fin.read().split()
            solzen, solaz = [float(q) for q in output[1:]]

        self.coszen = s.cos(solzen/360.0*2.0*s.pi)
        irr = irr / self.coszen
        self.solar_irr = irr.copy()

        results = {"wl": self.wl, 'solzen': solzen, 'irr': irr,
                   "solzen": solzen, "rhoatm": rhoatm, "transm": transm,
                   "sphalb": sphalb, "transup": transup}
        return results
Пример #35
0
def change_times2state_vec(change_times,total_time,initial_state='low'):
    """Converts change times to a state vector. A state vector is a np.array of
    bool or int type, 1 or True denotes 'high' and 0 or False denotes 'low'. Each
    index corresponds to one step of the RIO timebase (which is 1 ms at the 
    moment)

    Parameters
    ----------
    change_times : np.array

    total_time: int
        the total lenght
    
    initial_state: 'low' or 'high'
        denotes whether the first state is low or high. 
    
    Returns
    -------
    state_vec: np.array
        the state vector
    """
    
    state_vec = sp.zeros(total_time)
    
    # cast if not array
    if type(change_times) != sp.ndarray:
        change_times = sp.array(change_times)
    
    # deconstruct, always pairs of two if even, thats all, if uneven, add one at end
    if len(change_times) % 2 == 1:
        # append the last timepoint if last pulse doesn't end
        change_times = sp.concatenate((change_times,[total_time]))
        
    # deconstruct into pulses
    for i in range(0,len(change_times),2):
        state_vec[change_times[i]+1:change_times[i+1]+1] = 1
    
    state_vec = state_vec.astype('bool')
    if initial_state == 'high':
        state_vec = sp.logical_not(state_vec)
        
    return state_vec
Пример #36
0
def read(file_name, map_inds=None, feedback=2):
    """Read a map from an image fits file.
    """

    fname_abbr = ku.abbreviate_file_path(file_name)
    if feedback > 0:
        print 'Opening file: ' + fname_abbr
    # Open the fits file.
    hdulist = pyfits.open(file_name)
    history = bf.get_history_header(hdulist[0].header)
    history.add('Read from file.', 'File name: ' + fname_abbr)
    map_list = []

    if map_inds is None:
        map_inds = range(1, len(hdulist))
    elif not hasattr(map_inds, '__iter__'):
        map_inds = (map_inds, )
    elif len(scans) == 0:
        map_inds = range(1, len(hdulist))

    for ii in map_inds:
        data = hdulist[ii].data

        # Set the data attriute
        Map = data_map.DataMap()
        Map.set_data(sp.swapaxes(data, 0, 2))
        # Masked data is stored in FITS files as float('nan')
        Map.data[sp.logical_not(sp.isfinite(Map.data))] = ma.masked
        Map.history = history

        # Set the other fields.
        for field_name in fields:
            if not field_name in hdulist[1].header.keys():
                continue
            value = hdulist[1].header[field_name]
            Map.set_field(field_name, value)
        map_list.append(Map)
    if len(map_list) == 1:
        map_list = map_list[0]

    return map_list
Пример #37
0
def read(file_name, map_inds=None, feedback=2):
    """Read a map from an image fits file.
    """

    fname_abbr = ku.abbreviate_file_path(file_name)
    if feedback > 0:
        print "Opening file: " + fname_abbr
    # Open the fits file.
    hdulist = pyfits.open(file_name)
    history = bf.get_history_header(hdulist[0].header)
    history.add("Read from file.", "File name: " + fname_abbr)
    map_list = []

    if map_inds is None:
        map_inds = range(1, len(hdulist))
    elif not hasattr(map_inds, "__iter__"):
        map_inds = (map_inds,)
    elif len(scans) == 0:
        map_inds = range(1, len(hdulist))

    for ii in map_inds:
        data = hdulist[ii].data

        # Set the data attriute
        Map = data_map.DataMap()
        Map.set_data(sp.swapaxes(data, 0, 2))
        # Masked data is stored in FITS files as float('nan')
        Map.data[sp.logical_not(sp.isfinite(Map.data))] = ma.masked
        Map.history = history

        # Set the other fields.
        for field_name in fields:
            if not field_name in hdulist[1].header.keys():
                continue
            value = hdulist[1].header[field_name]
            Map.set_field(field_name, value)
        map_list.append(Map)
    if len(map_list) == 1:
        map_list = map_list[0]

    return map_list
Пример #38
0
    def test_off_map(self):
        Data = self.blocks[0]
        Data.calc_freq()
        map = self.map
        map[:, :, :] = 0.0
        Data.data[:, :, :, :] = 0.0

        # Rig the pointing but put one off the map.
        def rigged_pointing():
            Data.ra = map.get_axis('ra')[range(10)]
            Data.dec = map.get_axis('dec')[range(10)]
            Data.ra[3] = Data.ra[3] - 8.0

        Data.calc_pointing = rigged_pointing
        smd.sub_map(Data, map)
        self.assertTrue(sp.alltrue(ma.getmaskarray(Data.data[3, :, :, :])))
        self.assertTrue(
            sp.alltrue(
                sp.logical_not(
                    ma.getmaskarray(
                        (Data.data[[0, 1, 2, 4, 5, 6, 7, 8, 9], :, :, :])))))
Пример #39
0
def gen(conn, tedata, data, idx, time, shot, name='shots2016'):

    # GET THAT SIIIIICK DATA YEAH
    tped = goodGIW(time, shot, name="t_e_ped")
    tcore = goodGIW(time, shot, name="t_e_core")

    nped = goodGIW(time, shot, name="n_e_ped")
    ncore = goodGIW(time, shot, name="n_e_core")

    output = scipy.zeros(data.shape)

    for i in xrange(len(time)):
        print(i),
        ne = GIWprofiles.ne(GIWprofiles.te2rho2(tedata, tcore[i], tped[i]),
                            ncore[i], nped[i])
        ne[scipy.logical_not(scipy.isfinite(ne))] = 0.

        output[i] = data[i] * ne
        #multiply for new data

    writeData(idx, output, conn, name)
Пример #40
0
            'NT': nt,
            'RTMAX': rt_max,
            'RPMIN': rp_min,
            'RPMAX': rp_max
        }
        h.close()

    ### same header
    for i in range(nbData):
        for k in ['NSIDE', 'HLPXSCHM', 'NP', 'NT', 'RTMAX', 'RPMIN', 'RPMAX']:
            assert data[i][k] == data[0][k]

    ### Add unshared healpix as empty data
    for i in range(nbData):
        for j in range(nbData):
            w = sp.logical_not(sp.in1d(data[j]['HEALPID'], data[i]['HEALPID']))
            if w.sum() > 0:
                new_healpix = data[j]['HEALPID'][w]
                nb_new_healpix = new_healpix.size
                nb_bins = data[i]['DA'].shape[1]
                print("Some healpix are unshared in data {} vs. {}: {}".format(
                    i, j, new_healpix))
                data[i]['DA'] = sp.append(data[i]['DA'],
                                          sp.zeros((nb_new_healpix, nb_bins)),
                                          axis=0)
                data[i]['WE'] = sp.append(data[i]['WE'],
                                          sp.zeros((nb_new_healpix, nb_bins)),
                                          axis=0)
                data[i]['HEALPID'] = sp.append(data[i]['HEALPID'], new_healpix)

    ### Sort the data by the healpix values
Пример #41
0
        data['CO'] = sp.diag(co)
    else:
        print('INFO: Compute covariance from sub-sampling')

        ### To have same number of HEALPix
        for d1 in list(lst_file.keys()):
            for d2 in list(lst_file.keys()):

                if data[d1]['NSIDE']!=data[d2]['NSIDE']:
                    print('ERROR: NSIDE are different: {} != {}'.format(data[d1]['NSIDE'],data[d2]['NSIDE']))
                    sys.exit()
                if data[d1]['HLPXSCHM']!=data[d2]['HLPXSCHM']:
                    print('ERROR: HLPXSCHM are different: {} != {}'.format(data[d1]['HLPXSCHM'],data[d2]['HLPXSCHM']))
                    sys.exit()

                w = sp.logical_not( sp.in1d(data[d1]['HEALPID'],data[d2]['HEALPID']) )
                if w.sum()!=0:
                    print('WARNING: HEALPID are different by {} for {}:{} and {}:{}'.format(w.sum(),d1,data[d1]['HEALPID'].size,d2,data[d2]['HEALPID'].size))
                    new_healpix = data[d1]['HEALPID'][w]
                    nb_new_healpix = new_healpix.size
                    nb_bins = data[d2]['WE'].shape[1]
                    data[d2]['HEALPID'] = sp.append(data[d2]['HEALPID'],new_healpix)
                    data[d2]['WE'] = sp.append(data[d2]['WE'],sp.zeros((nb_new_healpix,nb_bins)),axis=0)

        ### Sort the data by the healpix values
        for d1 in list(lst_file.keys()):
            sort = sp.array(data[d1]['HEALPID']).argsort()
            data[d1]['WE'] = data[d1]['WE'][sort]
            data[d1]['HEALPID'] = data[d1]['HEALPID'][sort]

        if corr=='AUTO':
Пример #42
0
data = data.fillna(data.median(axis=0), axis=0)
data_describe = data.describe(include=[object])
for c in categorical_columns:
    data[c] = data[c].fillna(data_describe[c]['top'])
'''
data_describe = data.describe(include=[object])
binary_columns    = [c for c in categorical_columns if data_describe[c]['unique'] == 2]
nonbinary_columns = [c for c in categorical_columns if data_describe[c]['unique'] > 2]
print('Binary columns:', binary_columns, 'Nonbinary columns: ', nonbinary_columns)


for c in binary_columns:
    top = data_describe[c]['top']
    top_items = data[c] == top
    data.loc[top_items, c] = 0
    data.loc[np.logical_not(top_items), c] = 1
    
'''
Заменим признак A4 тремя признаками: A4_u, A4_y, A4_l.

Если признак A4 принимает значение u, то признак A4_u равен 1, A4_y равен 0, A4_l равен 0.
Если признак A4 принимает значение y, то признак A4_y равен 0, A4_y равен 1, A4_l равен 0.
Если признак A4 принимает значение l, то признак A4_l равен 0, A4_y равен 0, A4_l равен 1.
'''
data_nonbinary = pd.get_dummies(data[nonbinary_columns])

#scaler = StandardScaler()
#data_numerical = data[numerical_columns]
#scaler.fit_transform(data_numerical)

data_numerical = data[numerical_columns]
Пример #43
0
def sub_map(Data, Maps, correlate=False, pols=(), make_plots=False,
            interpolation='nearest') :
    """Subtracts a Map out of Data."""
    
    # Import locally since many machines don't have matplotlib.
    if make_plots :
        import matplotlib.pyplot as plt
    
    # Convert pols to an interable.
    if pols is None :
        pols = range(Data.dims[1])
    elif not hasattr(pols, '__iter__') :
        pols = (pols, )
    elif len(pols) == 0 :
        pols = range(Data.dims[1])
    # If solving for gains, need a place to store them.
    if correlate :
        out_gains = sp.empty((len(pols),) + Data.dims[2:4])
    for pol_ind in pols :
        # Check if there one map was passed or multiple.
        if isinstance(Maps, list) or isinstance(Maps, tuple) :
            if len(Maps) != len(pols) :
                raise ValueError("Must provide one map, or one map per "
                                 "polarization.")
            Map = Maps[pol_ind]
        else :
            Map = Maps
        if not Map.axes == ('freq', 'ra', 'dec') :
            raise ValueError("Expected map axes to be ('freq', 'ra', 'dec').")
        Data.calc_pointing()
        Data.calc_freq()
        # Map Parameters.
        centre = (Map.info['freq_centre'], Map.info['ra_centre'],
                  Map.info['dec_centre'])
        shape = Map.shape
        spacing = (Map.info['freq_delta'], Map.info['ra_delta'], 
                   Map.info['dec_delta'])
        # Nearest code is depricated.  We could just use the general code.
        if interpolation == 'nearest' :
            # These indices are the length of the time axis. Integer indicies.
            ra_ind = map.tools.calc_inds(Data.ra, centre[1], shape[1],
                                         spacing[1])
            dec_ind = map.tools.calc_inds(Data.dec, centre[2], shape[2],
                                          spacing[2])
            # Exclude indices that are off map or out of band. Boolian indices.
            on_map_inds = sp.logical_and(
                                 sp.logical_and(ra_ind>=0, ra_ind<shape[1]),
                                 sp.logical_and(dec_ind>=0, dec_ind<shape[2]))
            # Make an array of map data the size of the time stream data.
            submap = Map[:, ra_ind[on_map_inds], dec_ind[on_map_inds]]
        else :
            map_ra = Map.get_axis('ra')
            map_dec = Map.get_axis('dec')
            on_map_inds = sp.logical_and(
                sp.logical_and(Data.ra > min(map_ra), Data.ra < max(map_ra)),
                sp.logical_and(Data.dec > min(map_dec), Data.dec<max(map_dec)))
            submap = sp.empty((Map.shape[0], sp.sum(on_map_inds)), dtype=float)
            jj = 0
            for ii in range(len(on_map_inds)) :
                if on_map_inds[ii] :
                    submap[:, jj] = Map.slice_interpolate([1, 2], 
                            [Data.ra[ii], Data.dec[ii]], kind=interpolation)
                    jj += 1
        # Length of the data frequency axis.
        freq_ind = map.tools.calc_inds(Data.freq, centre[0], shape[0], 
                                       spacing[0])
        in_band_inds = sp.logical_and(freq_ind >= 0, freq_ind < shape[0])
        submap = submap[freq_ind[in_band_inds], ...]
        # Broadcast to the same shape and combine.
        covered_inds = sp.logical_and(on_map_inds[:, sp.newaxis], 
                                      in_band_inds[sp.newaxis, :])
        # submap is the size of the data that is on the map.  Expand to full 
        # size of data.
        subdata = sp.zeros(sp.shape(covered_inds))
        subdata[covered_inds] = sp.rollaxis(submap, 1, 0).flatten()
        subdata[sp.logical_not(covered_inds)] = 0.0
        # Now start using the actual data.  Loop over cal and pol indicies.
        for cal_ind in range(Data.dims[2]) :
            data = Data.data[:,pol_ind, cal_ind, :]
            data[sp.logical_not(covered_inds)] = ma.masked
            # Find the common good indicies.
            un_mask = sp.logical_not(data.mask)
            # Find the number of good indicies at each frequency.
            counts = sp.sum(un_mask, 0)
            counts[counts == 0] = -1
            # Subtract out the mean from the map.
            tmp_subdata = (subdata - sp.sum(un_mask*subdata, 0)/counts)
            # Correlate to solve for an unknown gain.
            if correlate :
                tmp_data = data.filled(0.0)
                tmp_data = (tmp_data - sp.sum(un_mask*data, 0)
                            / counts)
                gain = (sp.sum(un_mask*tmp_subdata*tmp_data, 0) / 
                        sp.sum(un_mask*tmp_subdata*tmp_subdata, 0))
                gain[counts == -1] = 0.0
                out_gains[pol_ind,cal_ind,:] = gain
            else :
                gain = 1.0
            # Now do the subtraction and mask the off map data.  We use the
            # mean subtracted map, to preserve data mean.
            if make_plots :
                plt.figure()
                plt.plot(ma.mean((gain*tmp_subdata), -1), '.b')
                plt.plot(ma.mean((tmp_subdata), -1), '.r')
                plt.plot(ma.mean((data - ma.mean(data, 0)), -1), '.g')
                #plt.plot(ma.mean((data), -1), '.g')
                #plt.plot((gain*tmp_subdata)[:, 45], '.b')
                #plt.plot((data - ma.mean(data, 0))[:, 45], '.g')
            data[...] -= gain*tmp_subdata
    if correlate :
        return out_gains
Пример #44
0
def multiply_by_cal(Data, CalData) :
    """Function scales data by the noise cal temperature.
    """

    # For now we just assume that the cal and polarizations are arranged in a
    # certain way and then check to make sure we are right.
    calibrate_to_I = False
    if tuple(Data.field['CRVAL4']) == (-5, -7, -8, -6) :
        xx_ind = 0
        yy_ind = 3
        xy_inds = [1,2]
    elif tuple(Data.field['CRVAL4']) == (1, 2, 3, 4) :
        # This is a hack.  Completly temporairy.
        calibrate_to_I = True
    else :
        raise ce.DataError('Polarization types not as expected in data.')

    cal_xx_ind = 0
    cal_yy_ind = 1
    if (CalData.field['CRVAL4'][cal_xx_ind] != -5 or
        CalData.field['CRVAL4'][cal_yy_ind] != -6) :
            raise ce.DataError('Polarization types not as expected in cal.')

    # Cal should only have 1 time, 1 cal state and 2 polarizations.
    if CalData.dims[:3] != (1,2,1) :
        raise ce.DataError('Cal temperature data has wrong dimensions.')

    # Cal state should be special state 'R'.
    if CalData.field['CAL'][0] != 'R' :
        raise ce.DataError("Cal state in cal temperture data should be "
                           "'R'.")

    # Bring the Cal data to the same frequencies as the other data.
    Data.calc_freq()
    CalData.calc_freq()
    if sp.allclose(Data.freq, CalData.freq) :
        cdata = CalData.data
    elif abs(Data.field['CDELT1']) <= abs(CalData.field['CDELT1']) :
        calfunc = interpolate.interp1d(CalData.freq, CalData.data, 
                                  fill_value=sp.nan, bounds_error=False)
        cdata = ma.array(calfunc(Data.freq))
        cdata[sp.logical_not(sp.isfinite(cdata))] = ma.masked
    else :
        nf = len(Data.freq)
        width = abs(Data.field['CDELT1'])
        cdata = ma.empty((1,2,1,nf))
        for find in range(nf) :
            f = Data.freq[find]
            inds, = sp.where(sp.logical_and(CalData.freq >= f - width/2.0,
                                           CalData.freq < f + width/2.0))
            cdata[:,:,:,find] = ma.mean(CalData.data[:,:,:,inds], 3)
    
    if calibrate_to_I :
        Data.data *= (cdata[0,cal_xx_ind,0,:] + cdata[0,cal_yy_ind,0,:])/2.0
    else :
        # Loop over times and cal and scale each polarization appropriately.
        for tind in range(Data.dims[0]) :
            for cind in range(Data.dims[2]) :
                Data.data[tind,xx_ind,cind,:] *= cdata[0,cal_xx_ind,0,:]
                Data.data[tind,yy_ind,cind,:] *= cdata[0,cal_yy_ind,0,:]
                Data.data[tind,xy_inds,cind,:] *= ma.sqrt(
                     cdata[0,cal_yy_ind,0,:] * cdata[0,cal_xx_ind,0,:])
Пример #45
0
    def doTestEdtWithHalo(self, haloSz=0):
        if (isinstance(haloSz, int) or ((sys.version_info.major < 3) and isinstance(haloSz, long))):
            if (haloSz < 0):
                haloSz = 0
            haloSz = sp.array((haloSz,)*3)
        
        subDirName = "doTestEdtWithHalo_%s" % ("x".join(map(str, haloSz)), )
        outDir = self.createTmpDir(subDirName)
        #outDir = subDirName
        if (mpi.world != None):
            if ((mpi.world.Get_rank() == 0) and (not os.path.exists(outDir))):
                os.makedirs(subDirName)
            mpi.world.barrier()

        segDds = mango.zeros(self.imgShape, mtype="segmented", halo=haloSz)
        segDds.setAllToValue(segDds.mtype.maskValue())
        mango.data.fill_ellipsoid(segDds, centre=self.centre, radius=(self.radius*1.05,)*3, fill=0)
        mango.data.fill_ellipsoid(segDds, centre=self.centre, radius=(self.radius,)*3, fill=1)
        mango.io.writeDds(os.path.join(outDir,"segmentedSphere.nc"), segDds)
        dtDds = mango.image.distance_transform_edt(segDds, 1)
        mango.io.writeDds(os.path.join(outDir,"distance_mapSphereEdt.nc"), dtDds)
        segDds.updateHaloRegions()
        dtDds.updateHaloRegions()
        segDds.setBorderToValue(segDds.mtype.maskValue())
        dtDds.setBorderToValue(dtDds.mtype.maskValue())
        
        slc = []
        for d in range(len(haloSz)):
            slc.append(slice(haloSz[d], segDds.asarray().shape[d]-haloSz[d]))
        
        slc = tuple(slc)

        arr = dtDds.subd_h.asarray()
        sbeg = dtDds.subd_h.origin
        send = sbeg + dtDds.subd_h.shape
        
        coords = np.ogrid[sbeg[0]:send[0],sbeg[1]:send[1],sbeg[2]:send[2]]
    
        distDds = mango.copy(dtDds)
        distArr = distDds.subd_h.asarray()
        distArr[...] = \
            sp.where(
                sp.logical_and(dtDds.subd_h.asarray() != dtDds.mtype.maskValue(), dtDds.subd_h.asarray() >= 0),
                self.radius
                -
                sp.sqrt(
                    ((coords[0]-self.centre[0])**2)
                    +
                    ((coords[1]-self.centre[1])**2)
                    +
                    ((coords[2]-self.centre[2])**2)
                ),
                dtDds.subd_h.asarray()
            )
        mango.io.writeDds(os.path.join(outDir,"distance_mapSphereDistRef.nc"), distDds)
    
        rootLogger.info("max diff = %s" % (np.max(sp.absolute(distArr - dtDds.subd_h.asarray())),))
        self.assertTrue(
            sp.all(
                sp.absolute(distArr - dtDds.subd_h.asarray())
                <=
                1.0
            )
        )
        self.assertTrue(
            sp.all(
                sp.logical_not(sp.logical_xor(
                    segDds.asarray() == 1,
                    dtDds.asarray()  >=  0
                ))
            )
        )

        self.assertTrue(
            sp.all(
                sp.logical_not(sp.logical_xor(
                    segDds.asarray() == segDds.mtype.maskValue(),
                    dtDds.asarray()  == dtDds.mtype.maskValue()
                ))
            )
        )

        self.assertTrue(
            sp.all(
                sp.logical_not(sp.logical_xor(
                    segDds.asarray() == 0,
                    dtDds.asarray()  == -1
                ))
            )
        )
        
        self.assertTrue(sp.all(segDds.halo == dtDds.halo))
        self.assertTrue(sp.all(segDds.shape == dtDds.shape))
        self.assertTrue(sp.all(segDds.origin == dtDds.origin), "%s != %s" % (segDds.origin, dtDds.origin))
        self.assertTrue(sp.all(segDds.mpi.shape == dtDds.mpi.shape))
Пример #46
0
def invertRSTO(RSTO,Iono,alpha_list=1e-2,invtype='tik',rbounds=[100,200],Nlin=0):
    """ This will run the inversion program given an ionocontainer, an alpha and """
    
    nlout,ntout,nl=Iono.Param_List.shape
    if Nlin !=0:
        nl=Nlin
    
    nlin=len(RSTO.Cart_Coords_In)
    time_out=RSTO.Time_Out
    time_in=RSTO.Time_In
    overlaps = RSTO.overlaps
    xin,yin,zin=RSTO.Cart_Coords_In.transpose()
    z_u=sp.unique(zin)
    rplane=sp.sqrt(xin**2+yin**2)*sp.sign(xin)
    r_u=sp.unique(rplane)
    n_z=z_u.size
    n_r=r_u.size
    dims= [n_r,n_z]
    
    rin,azin,elin=RSTO.Sphere_Coords_In.transpose()
    
    anglist=RSTO.simparams['angles']
    ang_vec=sp.array([[i[0],i[1]] for i in anglist])
    
    # trim out cruft
    
    zmin,zmax=[150,500]
    rpmin,rpmax=rbounds#[-50,100]#[100,200]
    altlog= sp.logical_and(zin>zmin,zin<zmax)
    rplog=sp.logical_and(rplane>rpmin,rplane<rpmax)
    allrng= RSTO.simparams['Rangegatesfinal']
    dR=allrng[1]-allrng[0]
    nldir=sp.ceil(int(nl)/2.)
    posang_log1= sp.logical_and(ang_vec[:,0]<=180.,ang_vec[:,0]>=0)
    negang_log1 = sp.logical_or(ang_vec[:,0]>180.,ang_vec[:,0]<0)
    azin_pos = sp.logical_and(azin<=180.,azin>=0)
    azin_neg = sp.logical_or(azin>180.,azin<0)
    minangpos=0
    minangneg=0
    
    
    if sp.any(posang_log1):
        minangpos=ang_vec[posang_log1,1].min()
    if sp.any(negang_log1):
        minangneg=ang_vec[negang_log1,1].min()
    
    rngbounds=[allrng[0]-nldir*dR,allrng[-1]+nldir*dR]
    rng_log=sp.logical_and(rin>rngbounds[0],rin<rngbounds[1])
    elbounds_pos=sp.logical_and(azin_pos,elin>minangpos)
    elbounds_neg=sp.logical_and(azin_neg,elin>minangneg)
    
    elbounds=sp.logical_or(elbounds_pos,elbounds_neg)
    keeplog=sp.logical_and(sp.logical_and(rng_log,elbounds),sp.logical_and(altlog,rplog))
    keeplist=sp.where(keeplog)[0]
    nlin_red=len(keeplist)
    # set up derivative matrix
    dx,dy=diffmat(dims)
    dx_red=dx[keeplist][:,keeplist]
    dy_red=dy[keeplist][:,keeplist]
    # need the sparse vstack to make srue things stay sparse
    D=sp.sparse.vstack((dx_red,dy_red))
    # New parameter matrix
    new_params=sp.zeros((nlin,len(time_out),nl),dtype=Iono.Param_List.dtype)
    if isinstance(alpha_list,numbers.Number):
        alpha_list=[alpha_list]*nl
    ave_datadif=sp.zeros((len(time_out),nl))
    ave_data_const = sp.zeros_like(ave_datadif)
    q=1e10
    for itimen, itime in enumerate(time_out):
        print('Making Outtime {0:d} of {1:d}'.format(itimen+1,len(time_out)))
        #allovers=overlaps[itimen]
        #curintimes=[i[0] for i in allovers]
        #for it_in_n,it in enumerate(curintimes):
        #print('\t Making Intime {0:d} of {1:d}'.format(it_in_n+1,len(curintimes)))
        #A=RSTO.RSTMat[itimen*nlout:(itimen+1)*nlout,it*nlin:(it+1)*nlin]
        A=RSTO.RSTMat[itimen*nlout:(itimen+1)*nlout,itimen*nlin:(itimen+1)*nlin]
        Acvx=cvx.Constant(A[:,keeplist])
        for ip in range(nl):
            alpha=alpha_list[ip]*2
            print('\t\t Making Lag {0:d} of {1:d}'.format(ip+1,nl))
            datain=Iono.Param_List[:,itimen,ip]
            xr=cvx.Variable(nlin_red)
            xi=cvx.Variable(nlin_red)
            if invtype.lower()=='tik':
                constr=alpha*cvx.norm(xr,2)
                consti=alpha*cvx.norm(xi,2)
            elif invtype.lower()=='tikd':
                constr=alpha*cvx.norm(D*xr,2)
                consti=alpha*cvx.norm(D*xi,2)
            elif invtype.lower()=='tv':
                constr=alpha*cvx.norm(D*xr,1)
                consti=alpha*cvx.norm(D*xi,1)
            br=datain.real/q
            bi=datain.imag/q
            if ip==0:
                objective=cvx.Minimize(cvx.norm(Acvx*xr-br,2)+constr)
                constraints= [xr>=0]
                prob=cvx.Problem(objective)
                result=prob.solve(verbose=True,solver=cvx.SCS,use_indirect=True,max_iters=4000)
#                    new_params[keeplog,it,ip]=xr.value.flatten()
                xcomp=sp.array(xr.value).flatten()*q
            else:
                objective=cvx.Minimize(cvx.norm(Acvx*xr-br,2)+constr)
                prob=cvx.Problem(objective)
                result=prob.solve(verbose=True,solver=cvx.SCS,use_indirect=True,max_iters=4000)
                
                objective=cvx.Minimize(cvx.norm(Acvx*xi-bi,2)+consti)
                prob=cvx.Problem(objective)
                result=prob.solve(verbose=True,solver=cvx.SCS,use_indirect=True,max_iters=4000)
                xcomp=sp.array(xr.value + 1j*xi.value).flatten()*q
#                    new_params[keeplog,it,ip]=xcomp
            new_params[keeplog,itimen,ip]=xcomp
            ave_datadif[itimen,ip]=sp.sqrt(sp.nansum(sp.absolute(A[:,keeplist].dot(xcomp)-datain)**2))
            if invtype.lower()=='tik':
                sumconst=sp.sqrt(sp.nansum(sp.power(sp.absolute(xcomp),2)))
            elif invtype.lower()=='tikd':
                dx=D.dot(xcomp)
                sumconst=sp.sqrt(sp.nansum(sp.power(sp.absolute(dx),2)))
            elif invtype.lower()=='tv':
                dx=D.dot(xcomp)
                sumconst=sp.nansum(sp.absolute(dx))
            ave_data_const[itimen,ip]=sumconst
            # set up nans                    
            new_params[sp.logical_not(keeplog),itimen]=sp.nan
    datadif=sp.nanmean(ave_datadif,axis=0)
    constval=sp.nanmean(ave_data_const,axis=0)
    ionoout=IonoContainer(coordlist=RSTO.Cart_Coords_In,paramlist=new_params,times = time_out,sensor_loc = sp.zeros(3),ver =0,coordvecs =
        ['x','y','z'],paramnames=Iono.Param_Names[:Nlin])
        
    return (ionoout,datadif,constval)
Пример #47
0
    def read(self, scans=None, IFs=None, force_tuple=False) :
        """Read in data from the fits file.

        This method reads data from the fits file including the files history
        and basically every peice of data that could be needed.  It is done,
        one scan and one IF at a time.  Each scan and IF is returned in an
        instance of the DataBlock class (defined in another module of this
        package).

        Arguments:
            scans: Which scans in the file to be processed.  A list of 
                integers, with 0 corresponding to the lowest numbered scan.
                Default is all of them.
            IFs: Which intermediate frequencies (also called frequency windows)
                to process.  A list of integers with 0 coorsponding to the 
                lowest frequency present. Default is all of them.
                TODO : Overlapping frequency windows stiched together somehow.
            force_tuple: By default, if there is only a single output Data
                Block, it is returned not wraped in a tuple, but if we want to
                loop over the output we can force the output to be a tuple,
                even if it only has one element.

        Returns: Instance of the DataBlock class, or a tuple of these instances
        (if asked for multiple scans and IFs).
        """
        
        # We want scans and IFs to be a sequence of indicies.
        if scans is None :
            scans = range(len(self.scan_set))
        elif not hasattr(scans, '__iter__') :
            scans = (scans, )
        elif len(scans) == 0 :
            scans = range(len(self.scan_set))
        if IFs is None :
            IFs = range(len(self.IF_set))
        elif not hasattr(IFs, '__iter__') :
            IFs = (IFs, )
        elif len(IFs) == 0 :
            IFs = range(len(self.IF_set))
        
        # Sequence of output DataBlock objects.
        output = ()
        for scan_ind in scans :
            for IF_ind in IFs :
                # Choose the appropriate records from the file, get that data.
                inds_sif = self.get_scan_IF_inds(scan_ind, IF_ind)
                Data_sif = db.DataBlock(self.fitsdata.field('DATA')[inds_sif])
                # Masked data is stored in FITS files as float('nan')
                Data_sif.data[sp.logical_not(sp.isfinite(
                                   Data_sif.data))] = ma.masked
                # Now iterate over the fields and add them
                # to the data block.
                for field, axis in fields_and_axes.iteritems() :
                    # See if this fits file has the key we are looking for.
                    if not field in self.fitsdata._names :
                        continue
                    # First get the 'FITS' format string.
                    field_format = self.hdulist[1].columns.formats[
                                self.hdulist[1].columns.names.index(field)]
                    if axis :
                        # From the indices in inds_sif, we only need a
                        # subset: which_data will subscript inds_sif.
                        temp_data = self.fitsdata.field(field)[inds_sif]
                        # For reshaping at the end.
                        field_shape = []
                        for ii, single_axis in enumerate(Data_sif.axes[0:-1]) :
                            # For each axis, slice out all the data except the
                            # stuff we need.
                            which_data = [slice(None)] * 3
                            if single_axis in axis :
                                field_shape.append(Data_sif.dims[ii])
                            else :
                                which_data[ii] = [0]
                            temp_data = temp_data[tuple(which_data)]
                        temp_data.shape = tuple(field_shape)
                        Data_sif.set_field(field, temp_data, axis, field_format)
                    else :
                        Data_sif.set_field(field, self.fitsdata.field(field)
                            [inds_sif[0,0,0]], axis, field_format)
                if hasattr(self, 'history') :
                    Data_sif.history = db.History(self.history)
                else :
                    self.history =bf.get_history_header(self.hdulist[0].header)
                    #self.set_history(Data_sif)
                    fname_abbr = ku.abbreviate_file_path(self.fname)
                    self.history.add('Read from file.', ('File name: ' + 
                                         fname_abbr, ))
                    Data_sif.history = db.History(self.history)
                Data_sif.verify()
                output = output + (Data_sif, )
        if self.feedback > 2 :
            print 'Read finished.'
        if len(output) == 1 and not force_tuple :
            return output[0]
        else :
            return output
Пример #48
0
def removeQuadOff(input_unw_path, width, length):

	ramp_removed_unw_path = "ramp_removed_" + input_unw_path[input_unw_path.rfind("/") + 1 : input_unw_path.rfind(".")] + ".unw";

	assert not os.path.exists(ramp_removed_unw_path), "\n***** ERROR: " + ramp_removed_unw_path + " already exists, exiting...\n";

	import subprocess;

	mag = input_unw_path[input_unw_path.rfind("/") + 1 : input_unw_path.rfind(".")] + ".mag";
	phs = input_unw_path[input_unw_path.rfind("/") + 1 : input_unw_path.rfind(".")] + ".phs";
	
	cmd  = "\nrmg2mag_phs " + input_unw_path + " " + mag + " " + phs + " " + width + "\n";
	cmd += "\nrm " + mag + "\n";
	subprocess.call(cmd,shell=True);

	infile = open(phs, "rb");

	import pylab;

	indat = pylab.fromfile(infile,pylab.float32,-1).reshape(int(length), int(width));
	#indat = pylab.fromfile(infile,pylab.float32,-1).reshape(int(width) * int(length), -1);

	infile.close();

	import scipy;

	x = scipy.arange(0, int(length));
	y = scipy.arange(0, int(width));

	import numpy;

	x_grid, y_grid = numpy.meshgrid(x, y);

	indices = numpy.arange(0, int(width) * int(length));

	mx = scipy.asarray(x_grid).reshape(-1);
	my = scipy.asarray(y_grid).reshape(-1);
	d  = scipy.asarray(indat).reshape(-1); 

	nonan_ids = indices[scipy.logical_not(numpy.isnan(d))];

	mx = mx[nonan_ids];
	my = my[nonan_ids];
	d  = d[nonan_ids];

	init_mx      = scipy.asarray(x_grid).reshape(-1)[nonan_ids];
	init_my      = scipy.asarray(y_grid).reshape(-1)[nonan_ids];
	#ramp_removed = scipy.asarray(indat).reshape(-1)[nonan_ids];
	ramp_removed = scipy.zeros(int(length) * int(width))[nonan_ids];
	init_m_ones  = scipy.ones(int(length) * int(width))[nonan_ids];

#	init_xs = [init_m_ones, init_mx, init_my, scipy.multiply(init_mx,init_my), scipy.power(init_mx,2), scipy.power(init_my,2)];
	init_xs = [init_mx];

	print(len(init_xs));

	p0 = scipy.zeros(len(init_xs));
	p  = scipy.zeros(len(init_xs));

	import scipy.optimize;

	for i in scipy.arange(0,10):

		m_ones = scipy.ones(scipy.size(mx));

#		xs     = [m_ones, mx, my, scipy.multiply(mx,my), scipy.power(mx,2), scipy.power(my,2)];
		xs     = [mx];

		G      = scipy.vstack(xs).T;
		print(mx);
		plsq   = scipy.optimize.leastsq(residuals, p0, args = (d, xs));

		res    = d - peval(xs, plsq[0]);
		mod    = plsq[0];

		p = p + mod;
		print(plsq[0]);

#		synth  = G * scipy.matrix(mod).T;
#		cutoff = res.std(axis=0,ddof=1);
		#print(cutoff);

#		indices  = numpy.arange(0, numpy.size(mx));
#		good_ids = indices[abs(res) <= cutoff];

#		plt.figure(i + 2);
#		plt.plot(mx,d,'b.',label='alloff');
#		plt.plot(mx[good_ids],synth[good_ids],'.',label='fit',color='lightgreen');
#		plt.plot(mx[bad_ids],d[bad_ids],'r.',label='cull #' + str(i + 1));
#		plt.legend();
 
#		mx = mx[good_ids];
#		my = my[good_ids];
#		d  = res[good_ids];

		d  = res;
		print(sum(res));

#		ramp_removed = scipy.asarray(ramp_removed - peval(init_xs, plsq[0]));
		ramp_removed = scipy.asarray(ramp_removed + peval(init_xs, plsq[0]));

	d = scipy.asarray(indat).reshape(-1);

	for i in range(0, scipy.size(nonan_ids)):
	
		d[nonan_ids[i]] = ramp_removed[i];

	ramp_removed = d.reshape(int(length), int(width));

#	import matplotlib;
#	matplotlib.pyplot.imshow(scipy.array(indat),interpolation='nearest',origin='lower');
#	matplotlib.pyplot.show();

	outfile = open(ramp_removed_unw_path, "wb");

	outoff = scipy.matrix(scipy.hstack((ramp_removed, ramp_removed)), scipy.float32);

	outoff.tofile(outfile);

	outfile.close();
Пример #49
0
def make_masked_time_stream(Blocks, ntime=None) :
    """Converts Data Blocks into a single uniformly sampled time stream.
    
    Also produces the mask giving whether elements are valid entries or came
    from a zero pad.  This produes the required inputs for calculating a
    windowed power spectrum.

    Parameters
    ----------
    Blocks : tuple of DataBlock objects.
    ntime : int
        Total number of time bins in output arrays.  If shorter than required
        extra data is truncated.  If longer, extra data is masked.  Default is
        to use exactly the number that fits all the data.

    Returns
    -------
    time_stream : array
        All the data in `Blocks` but concatenated along the time axis and
        padded with zeros such that the time axis is uniformly sampled and
        uninterupted.
    mask : array same shape as `time_stream`
        1.0 if data in the correspoonding `time_stream` element is filled 
        and 0 if the data was missing.  This is like a window where 
        time_stream = mask*real_data.
    dt : float
        The time step of the returned time stream.
    """

    # Shape of all axes except the time axis.
    back_shape = Blocks[0].dims[1:]
    # Get the time sample spacing.
    Blocks[0].calc_time()
    dt = abs(sp.mean(sp.diff(Blocks[0].time)))
    # Find the beginning and the end of the time axis by looping through
    # blocks.
    min_time = float('inf')
    max_time = 0.0
    for Data in Blocks :
        Data.calc_time()
        min_time = min(min_time, min(Data.time))
        max_time = max(min_time, max(Data.time))
        # Ensure that the time sampling is uniform.
        if not (sp.allclose(abs(sp.diff(Data.time)), dt, rtol=0.1)
                and sp.allclose(abs(sp.mean(sp.diff(Data.time))), dt,
                                rtol=0.001)) :
            msg = ("Time sampling not uniformly spaced or Data Blocks don't "
                   "agree on sampling.")
            raise ce.DataError(msg)
        # Ensure the shapes are right.
        if Data.dims[1:] != back_shape :
            msg = ("All data blocks must have the same shape except the time "
                   "axis.")
            raise ce.DataError(msg)
    # Calculate the time axis.
    if not ntime :
        time = sp.arange(min_time, max_time + dt, dt)
        ntime = len(time)
    else :
        time = sp.arange(ntime)*dt + min_time
    # Allowcate memory for the outputs.
    time_stream = sp.zeros((ntime,) + back_shape, dtype=float)
    mask = sp.zeros((ntime,) + back_shape, dtype=sp.float32)
    # Loop over all times and fill in the arrays.
    for Data in Blocks :
        # Apply an offset to the time in case the start of the Data Block
        # doesn't line up with the time array perfectly.
        offset = time[sp.argmin(abs(time - Data.time[0]))] - Data.time[0]
        for ii in range(Data.dims[0]) :
            ind = sp.argmin(abs(time - (Data.time[ii] + offset)))
            if abs(time[ind] - (Data.time[ii])) < 0.5*dt :
                if sp.any(mask[ind, ...]) :
                    msg = "Overlapping times in Data Blocks."
                    raise ce.DataError(msg)
                time_stream[ind, ...] = Data.data[ii, ...].filled(0.0)
                mask[ind, ...] = sp.logical_not(Data.data.mask[ii, ...])
    return time_stream, mask, dt
Пример #50
0
def make_masked_time_stream(Blocks, ntime=None, window=None, 
                            return_means=False, subtract_slope=False) :
    """Converts Data Blocks into a single uniformly sampled time stream.
    
    Also produces the mask giving whether elements are valid entries or came
    from a zero pad.  This produes the required inputs for calculating a
    windowed power spectrum.

    Parameters
    ----------
    Blocks : tuple of DataBlock objects.
    ntime : int
        Total number of time bins in output arrays.  If shorter than required
        extra data is truncated.  If longer, extra data is masked.  Default is
        to use exactly the number that fits all the data.  Set to a negitive
        factor to zero pad to a power of 2 and by at least at least the factor.
    window : string or tuple
        Type of window to apply to each DataBlock.  Valid options are the valid
        arguments to scipy.signal.get_window().  By default, don't window.
    return_means : bool
        Whether to return an array of the channed means.
    subtract_slope : bool
        Whether to subtract a linear function of time from each channel.

    Returns
    -------
    time_stream : array
        All the data in `Blocks` but concatenated along the time axis and
        padded with zeros such that the time axis is uniformly sampled and
        uninterupted.
    mask : array same shape as `time_stream`
        1.0 if data in the correspoonding `time_stream` element is filled 
        and 0 if the data was missing.  This is like a window where 
        time_stream = mask*real_data.
    dt : float
        The time step of the returned time stream.
    means : array (optional)
        The mean from each channel.
    """

    # Shape of all axes except the time axis.
    back_shape = Blocks[0].dims[1:]
    # Get the time sample spacing.
    Blocks[0].calc_time()
    dt = abs(sp.mean(sp.diff(Blocks[0].time)))
    # Find the beginning and the end of the time axis by looping through
    # blocks.
    # Also get the time axis and the mask
    # for calculating basis polynomials.
    unmask = sp.zeros((0,) + back_shape, dtype=bool)
    time = sp.zeros((0,), dtype=float)
    start_ind = []
    min_time = float('inf')
    max_time = 0.0
    #mean_time = 0.0
    #n_data_times = 0
    for Data in Blocks :
        Data.calc_time()
        start_ind.append(len(time))
        time = sp.concatenate((time, Data.time))
        this_unmask = sp.logical_not(ma.getmaskarray(Data.data))
        unmask = sp.concatenate((unmask, this_unmask), 0)
        # Often the start or the end of a scan is completly masked.  Make sure
        # we don't start till the first unmasked time and end at the last
        # unmasked time.
        time_unmask = sp.alltrue(ma.getmaskarray(Data.data), -1)
        time_unmask = sp.alltrue(time_unmask, -1)
        time_unmask = sp.alltrue(time_unmask, -1)
        if sp.alltrue(time_unmask):
            continue
        time_unmask = sp.logical_not(time_unmask)
        min_time = min(min_time, min(Data.time[time_unmask]))
        max_time = max(min_time, max(Data.time[time_unmask]))
        #mean_time += sp.sum(Data.time[time_unmask])
        #n_data_times += len(Data.time[time_unmask])
        # Ensure that the time sampling is uniform.
        if not (sp.allclose(abs(sp.diff(Data.time)), dt, rtol=0.1)
                and sp.allclose(abs(sp.mean(sp.diff(Data.time))), dt,
                                rtol=0.001)) :
            msg = ("Time sampling not uniformly spaced or Data Blocks don't "
                   "agree on sampling.")
            raise ce.DataError(msg)
        # Ensure the shapes are right.
        if Data.dims[1:] != back_shape :
            msg = ("All data blocks must have the same shape except the time "
                   "axis.")
            raise ce.DataError(msg)
    # Now calculate basis polynomials for the mean mode and the slope mode.
    polys = misc.ortho_poly(time[:,None,None,None], 2, unmask, 0)
    #mean_time /= n_data_times
    #if n_data_times == 0:
    #    n_data_times = 1
    # Very important to subtract the mean out of the signal, otherwise the
    # window coupling to the mean (0) mode will dominate everything. Can also
    # optionally take out a slope.
    # Old algorithm.
    #total_sum = 0.0
    #total_counts = 0
    #total_slope = 0.0
    #time_norm = 0.0
    #for Data in Blocks:
    #    total_sum += sp.sum(Data.data.filled(0), 0)
    #    total_counts += ma.count(Data.data, 0)
    #    total_slope += sp.sum(Data.data.filled(0) 
    #                          * (Data.time[:,None,None,None] - mean_time), 0)
    #    time_norm += sp.sum(sp.logical_not(ma.getmaskarray(Data.data))
    #                        * (Data.time[:,None,None,None] - mean_time)**2, 0)
    #total_counts[total_counts == 0] = 1
    #time_norm[time_norm == 0.0] = 1
    #total_mean = total_sum / total_counts
    #total_slope /= time_norm
    # New algorithm.
    mean_amp = 0
    slope_amp = 0
    for ii, Data in enumerate(Blocks):
        si = start_ind[ii]
        this_nt = Data.dims[0]
        data = Data.data.filled(0)
        mean_amp += sp.sum(data * unmask[si:si + this_nt,...]
                           * polys[0,si:si + this_nt,...], 0)
        slope_amp += sp.sum(data * unmask[si:si + this_nt,...]
                            * polys[1,si:si + this_nt,...], 0)
    polys[0,...] *= mean_amp
    polys[1,...] *= slope_amp
    # Calculate the time axis.
    if min_time > max_time:
        min_time = 0
        max_time = 6 * dt
    if not ntime :
        ntime = (max_time - min_time) // dt + 1
    elif ntime < 0:
        # 0 pad by a factor of at least -ntime, but at most 10% more than this.
        time_min = -ntime * (max_time - min_time) / dt
        n_block = 1
        while n_block < time_min/20.0:
            n_block *= 2
        ntime = (time_min//n_block  + 1) * n_block

    time = sp.arange(ntime)*dt + min_time
    # Allowcate memory for the outputs.
    time_stream = sp.zeros((ntime,) + back_shape, dtype=float)
    mask = sp.zeros((ntime,) + back_shape, dtype=sp.float32)
    # Loop over all times and fill in the arrays.
    for ii, Data in enumerate(Blocks):
        this_nt = Data.dims[0]
        si = start_ind[ii]
        # Subtract the mean calculated above.
        this_data = Data.data.copy()
        this_data -= polys[0,si:si + this_nt,...]
        # If desired, subtract of the linear function of time.
        if subtract_slope:
            #this_data -= (total_slope 
            #              * (Data.time[:,None,None,None] - mean_time))
            this_data -= polys[1,si:si + this_nt,...]
        # Find the first and last unmasked times.
        time_unmask = sp.alltrue(ma.getmaskarray(this_data), -1)
        time_unmask = sp.alltrue(time_unmask, -1)
        time_unmask = sp.alltrue(time_unmask, -1)
        if sp.alltrue(time_unmask):
            continue
        time_unmask = sp.logical_not(time_unmask)
        unmasked_ind, = sp.where(time_unmask)
        first_ind = min(unmasked_ind)
        last_ind = max(unmasked_ind)
        # Ensure that the time sampling is uniform.
        if not (sp.allclose(abs(sp.diff(Data.time)), dt, rtol=0.1)
                and sp.allclose(abs(sp.mean(sp.diff(Data.time))), dt,
                                rtol=0.001)) :
            msg = ("Time sampling not uniformly spaced or Data Blocks don't "
                   "agree on sampling.")
            raise ce.DataError(msg)
        # Ensure the shapes are right.
        if Data.dims[1:] != back_shape :
            msg = ("All data blocks must have the same shape except the time "
                   "axis.")
        # Apply an offset to the time in case the start of the Data Block
        # doesn't line up with the time array perfectly.
        offset = (time[sp.argmin(abs(time - Data.time[first_ind]))]
                  - Data.time[first_ind])
        # Generate window function.
        if window:
            window_function = sig.get_window(window, last_ind - first_ind + 1)
        for ii in range(first_ind, last_ind + 1) :
            ind = sp.argmin(abs(time - (Data.time[ii] + offset)))
            if abs(time[ind] - (Data.time[ii])) < 0.5*dt :
                if sp.any(mask[ind, ...]) :
                    msg = "Overlapping times in Data Blocks."
                    raise ce.DataError(msg)
                if window:
                    window_value = window_function[ii - first_ind]
                else :
                    window_value = 1.0
                time_stream[ind, ...] = (window_value 
                                         * this_data[ii, ...].filled(0.0))
                mask[ind, ...] = window_value * sp.logical_not(ma.getmaskarray(
                                     this_data)[ii, ...])
    if return_means:
        return time_stream, mask, dt, polys[0,0,...]
    else :
        return time_stream, mask, dt
Пример #51
0
def flag(Data, NoiseData, thres=3.0, max_noise_factor=-1, modes_subtract=1,
         filter_type='edge'):
    """Flags data for outliers using a signal subtracted data set.
    
    Flags outliers of in a time stream data by looking at a version of the data
    that has had the signal subtracted out of it.  Each frequency channel,
    polarization and cal state are treated separately.

    Parameters
    ----------
    Data : DataBlock Object
        Data to be flaged.  Upon exit, this object will have new flags.
    NoiseData : DataBlock Object
        Version of `Data` with the signal subtracted.
    thres : float
        Threshold for flagging in units of sigma (default is 3.0).
    modes_subtract : int
        How many modes to remove for high pass filtering.
    filter_type : {'edge', 'gaussian', 'gaussian/edge'}
        Type of high pass filtering to use.
    """
    
    # Get the mask and the data as normal arrays.
    # Copy seems to be nessisary if the mask is None.
    data = NoiseData.data.filled(0).copy()
    mask = ma.getmaskarray(NoiseData.data)
    ## High pass filter the data to make outliers stand out.
    un_mask = sp.logical_not(mask)
    NoiseData.calc_time()
    time = NoiseData.time
    n_time = len(time)
    # How many basis polynomials we need and with what fraction of each mode
    # gets subtracted out..
    if filter_type == 'edge':
        n_polys = modes_subtract
        subtract_weights = sp.ones(n_polys)
    elif filter_type == 'gaussian' or filter_type == 'gaussian/edge':
        n_polys = 4 * modes_subtract
        subtract_weights = sp.exp(-(sp.arange(n_polys, dtype=float)
                                     / modes_subtract)**2 / 2.)
        if filter_type == 'gaussian/edge':
            subtract_weights[0:2] = 1.
    # Test if the mask is the same for all slices.  If it is, that greatly
    # reduces the work as we only have to generate one set of polynomials.
    all_masks_same = True
    for jj in range(n_time):
        if sp.all(un_mask[jj,...] == un_mask[jj,0,0,0]):
            continue
        else:
            all_masks_same = False
            break
    if all_masks_same:
        polys = misc.ortho_poly(time, n_polys, un_mask[:,0,0,0], 0)
        polys.shape = (n_polys, len(time), 1, 1, 1)
    else:
        polys = misc.ortho_poly(time[:,None,None,None], n_polys, un_mask, 0)
    # Subtract the slope mode (1th mode) out of the NoiseData.
    amps = sp.sum(data * un_mask * polys, 1)
    amps *= subtract_weights[:,None,None,None]
    data -= sp.sum(amps[:,None,:,:,:] * un_mask[None,:,:,:,:] * polys, 0)
    ## Do the main outlier flagging.
    # Iteratively flag on sliding scale to get closer and closer to desired
    # threshold.
    max_thres = sp.sqrt(n_time)/2.
    n_iter = 3
    thresholds = (max_thres ** (n_iter - 1 - sp.arange(n_iter))
                 * thres ** sp.arange(n_iter)) ** (1./(n_iter - 1))
    for threshold in thresholds:
        # Subtract the mean from every channel.
        this_data = masked_subtract_mean(data, mask, 0)
        # Calculate the variance.
        un_mask = sp.logical_not(mask)
        counts = sp.sum(un_mask, 0)
        counts[counts == 0] = 1
        std = sp.sqrt(sp.sum(this_data**2 * un_mask, 0) / counts)
        bad_inds = abs(this_data) > threshold * std
        # If any polarization or cal state is masked, they all should be.
        bad_inds = sp.any(sp.any(bad_inds, 1), 1)
        mask[bad_inds[:,None,None,:]] = True
    ## Now look for times with excusion frequency average
    # (achromatic out-liers).
    # Compute the frequency mean.
    un_mask = sp.logical_not(mask)
    counts = sp.sum(un_mask, -1)
    fmean_un_mask = counts >= 1
    counts[counts == 0] = 1
    fmean = sp.sum(data * un_mask, -1) / counts
    # Subtract the time mean.
    fmean = masked_subtract_mean(fmean, sp.logical_not(fmean_un_mask), 0)
    # Get the variance.
    counts = sp.sum(fmean_un_mask, 0)
    counts[counts == 0] = 1
    fmean_std = sp.sqrt(sp.sum(fmean**2 * fmean_un_mask, 0) / counts)
    # Flag any time that is an outlier (for any polarization or cal state).
    bad_times = sp.any(sp.any(abs(fmean) > thres * fmean_std, 1), 1)
    mask[bad_times,:,:,:] = True
    ## Flag for very noisy channels.
    if max_noise_factor > 0:
        # Do this a few times to make sure we get everything.
        for ii in range(3):
            this_data = masked_subtract_mean(data, mask, 0)
            # Compute varience accounting for the mask.
            un_mask = sp.logical_not(mask)
            counts = sp.sum(un_mask, 0)
            vars_un_mask = counts >= 1
            counts[counts == 0] = 1
            vars = sp.sum(this_data**2 * un_mask, 0) / counts
            # Find the mean of the variences.
            counts = sp.sum(vars_un_mask, -1)
            counts[counts == 0] = 1
            mean_vars = sp.sum(vars * vars_un_mask, -1) / counts
            # Find channels that stand out (for any polarization or cal state).
            bad_chans = sp.any(sp.any(vars > max_noise_factor *
                                      mean_vars[:,:,None], 0), 0)
            mask[:,:,:,bad_chans] = True
    ## Transfer the mask to the DataBlock objects.
    Data.data[mask] = ma.masked
    NoiseData.data[mask] = ma.masked
Пример #52
0
def flag(Data,
         NoiseData,
         thres=3.0,
         max_noise_factor=-1,
         modes_subtract=1,
         filter_type='edge'):
    """Flags data for outliers using a signal subtracted data set.
    
    Flags outliers of in a time stream data by looking at a version of the data
    that has had the signal subtracted out of it.  Each frequency channel,
    polarization and cal state are treated separately.

    Parameters
    ----------
    Data : DataBlock Object
        Data to be flaged.  Upon exit, this object will have new flags.
    NoiseData : DataBlock Object
        Version of `Data` with the signal subtracted.
    thres : float
        Threshold for flagging in units of sigma (default is 3.0).
    modes_subtract : int
        How many modes to remove for high pass filtering.
    filter_type : {'edge', 'gaussian', 'gaussian/edge'}
        Type of high pass filtering to use.
    """

    # Get the mask and the data as normal arrays.
    # Copy seems to be nessisary if the mask is None.
    data = NoiseData.data.filled(0).copy()
    mask = ma.getmaskarray(NoiseData.data)
    ## High pass filter the data to make outliers stand out.
    un_mask = sp.logical_not(mask)
    NoiseData.calc_time()
    time = NoiseData.time
    n_time = len(time)
    # How many basis polynomials we need and with what fraction of each mode
    # gets subtracted out..
    if filter_type == 'edge':
        n_polys = modes_subtract
        subtract_weights = sp.ones(n_polys)
    elif filter_type == 'gaussian' or filter_type == 'gaussian/edge':
        n_polys = 4 * modes_subtract
        subtract_weights = sp.exp(
            -(sp.arange(n_polys, dtype=float) / modes_subtract)**2 / 2.)
        if filter_type == 'gaussian/edge':
            subtract_weights[0:2] = 1.
    # Test if the mask is the same for all slices.  If it is, that greatly
    # reduces the work as we only have to generate one set of polynomials.
    all_masks_same = True
    for jj in range(n_time):
        if sp.all(un_mask[jj, ...] == un_mask[jj, 0, 0, 0]):
            continue
        else:
            all_masks_same = False
            break
    if all_masks_same:
        polys = misc.ortho_poly(time, n_polys, un_mask[:, 0, 0, 0], 0)
        polys.shape = (n_polys, len(time), 1, 1, 1)
    else:
        polys = misc.ortho_poly(time[:, None, None, None], n_polys, un_mask, 0)
    # Subtract the slope mode (1th mode) out of the NoiseData.
    amps = sp.sum(data * un_mask * polys, 1)
    amps *= subtract_weights[:, None, None, None]
    data -= sp.sum(amps[:, None, :, :, :] * un_mask[None, :, :, :, :] * polys,
                   0)
    ## Do the main outlier flagging.
    # Iteratively flag on sliding scale to get closer and closer to desired
    # threshold.
    max_thres = sp.sqrt(n_time) / 2.
    n_iter = 3
    thresholds = (max_thres**(n_iter - 1 - sp.arange(n_iter)) *
                  thres**sp.arange(n_iter))**(1. / (n_iter - 1))
    for threshold in thresholds:
        # Subtract the mean from every channel.
        this_data = masked_subtract_mean(data, mask, 0)
        # Calculate the variance.
        un_mask = sp.logical_not(mask)
        counts = sp.sum(un_mask, 0)
        counts[counts == 0] = 1
        std = sp.sqrt(sp.sum(this_data**2 * un_mask, 0) / counts)
        bad_inds = abs(this_data) > threshold * std
        # If any polarization or cal state is masked, they all should be.
        bad_inds = sp.any(sp.any(bad_inds, 1), 1)
        mask[bad_inds[:, None, None, :]] = True
    ## Now look for times with excusion frequency average
    # (achromatic out-liers).
    # Compute the frequency mean.
    un_mask = sp.logical_not(mask)
    counts = sp.sum(un_mask, -1)
    fmean_un_mask = counts >= 1
    counts[counts == 0] = 1
    fmean = sp.sum(data * un_mask, -1) / counts
    # Subtract the time mean.
    fmean = masked_subtract_mean(fmean, sp.logical_not(fmean_un_mask), 0)
    # Get the variance.
    counts = sp.sum(fmean_un_mask, 0)
    counts[counts == 0] = 1
    fmean_std = sp.sqrt(sp.sum(fmean**2 * fmean_un_mask, 0) / counts)
    # Flag any time that is an outlier (for any polarization or cal state).
    bad_times = sp.any(sp.any(abs(fmean) > thres * fmean_std, 1), 1)
    mask[bad_times, :, :, :] = True
    ## Flag for very noisy channels.
    if max_noise_factor > 0:
        # Do this a few times to make sure we get everything.
        for ii in range(3):
            this_data = masked_subtract_mean(data, mask, 0)
            # Compute varience accounting for the mask.
            un_mask = sp.logical_not(mask)
            counts = sp.sum(un_mask, 0)
            vars_un_mask = counts >= 1
            counts[counts == 0] = 1
            vars = sp.sum(this_data**2 * un_mask, 0) / counts
            # Find the mean of the variences.
            counts = sp.sum(vars_un_mask, -1)
            counts[counts == 0] = 1
            mean_vars = sp.sum(vars * vars_un_mask, -1) / counts
            # Find channels that stand out (for any polarization or cal state).
            bad_chans = sp.any(
                sp.any(vars > max_noise_factor * mean_vars[:, :, None], 0), 0)
            mask[:, :, :, bad_chans] = True
    ## Transfer the mask to the DataBlock objects.
    Data.data[mask] = ma.masked
    NoiseData.data[mask] = ma.masked
def flowisentropic(**flow):
    """
    Evaluate the isentropic relations with any flow variable.
    
    This function accepts a given set of specific heat ratios and
    a single input of isentropic flow variables. Inputs can be a single
    scalar or an array_like data structure.

    Parameters
    ----------
    gamma : array_like, optional
        Specific heat ratio. Values must be greater than 1.
    M : array_like
        Mach number. Values must be greater than or equal to 0.
    T : array_like
        Temperature ratio T/T0. Values must be 0 <= T <= 1. 
    P : array_like
        Pressure ratio P/P0. Values must be 0 <= P <= 1.
    rho : array_like
        Density ratio rho/rho0. Values must be 0 <= rho <= 1.
    sub : array_like
        Subsonic area ratio A/A*. Values must be greater than or equal
        to 1.
    sup : array_like
        Supersonic area ratio A/A*. Values must be greater than or
        equal to 1.
    
    Returns
    -------
    out : (M, T, P, rho, area)
        Tuple of Mach number, temperature ratio, pressure ratio, density
        ratio and area ratio.
        
    Notes
    -----
    This function accepts one and only one of the isentropic flow
    variables. It will raise an Exception when more than one input
    is given.
    
    Examples
    --------
    >>> flowisentropic(M=3)
    (3.0, 0.35714285714285715, 0.027223683703862824, 0.076226314370815895,
    4.2345679012345689)
    >>> flowisentropic(gamma=1.4, sup=1.6)
    (1.9352576078182122, 0.57174077399894296, 0.14131786852470815,
    0.24717122680666009, 1.6000000000000001)
    >>> flowisentropic(T=sp.linspace(0, 1, 100))
    (array, array, array, array, array)
    """

    #parse the input
    gamma, flow, mtype, itype = _flowinput(flow)

    #calculate gamma-ratios for use in the equations
    a = (gamma+1) / 2
    b = (gamma-1) / 2
    c = a / (gamma-1)

    #preshape mach array
    M = sp.empty(flow.shape, sp.float64)

    #use the isentropic relations to solve for the mach number
    if mtype in ["mach", "m"]:
        if (flow < 0).any() or not sp.isreal(flow).all():
            raise Exception("Mach number inputs must be real numbers" \
                " greater than or equal to 0.")
        M = flow
    elif mtype in ["temp", "t"]:
        if (flow < 0).any() or (flow > 1).any():
            raise Exception("Temperature ratio inputs must be real numbers" \
                " 0 <= T <= 1.")
        M[flow == 0] = sp.inf
        M[flow != 0] = sp.sqrt((1/b[flow != 0])*(flow[flow != 0]**(-1) - 1))
    elif mtype in ["pres", "p"]:
        if (flow < 0).any() or (flow > 1).any():
            raise Exception("Pressure ratio inputs must be real numbers" \
                " 0 <= P <= 1.")
        M[flow == 0] = sp.inf
        M[flow != 0] = sp.sqrt((1/b[flow != 0]) * \
            (flow[flow != 0]**((gamma[flow != 0]-1)/-gamma[flow != 0]) - 1))
    elif mtype in ["dens", "d", "rho"]:
        if (flow < 0).any() or (flow > 1).any():
            raise Exception("Density ratio inputs must be real numbers" \
                " 0 <= rho <= 1.")
        M[flow == 0] = sp.inf
        M[flow != 0] = sp.sqrt((1/b[flow != 0]) * \
            (flow[flow != 0]**((gamma[flow != 0]-1)/-1) - 1))
    elif mtype in ["sub", "sup"]:
        if (flow < 1).any():
            raise Exception("Area ratio inputs must be real numbers greater" \
                " than or equal to 1.")
        M[:] = 0.2 if mtype == "sub" else 1.8
        for _ in xrange(_AETB_iternum):
            K = M ** 2
            f = -flow + a**(-c) * ((1+b*K)**c) / M #mach-area relation
            g = a**(-c) * ((1+b*K)**(c-1)) * (b*(2*c - 1)*K - 1) / K #deriv
            M = M - (f / g) #Newton-Raphson
        M[flow == 1] = 1
        M[sp.isinf(flow)] = sp.inf
    else:
        raise Exception("Keyword input must be an acceptable string to" \
            " select input parameter.")

    d = 1 + b*M**2
    
    T = d**(-1)
    P = d**(-gamma/(gamma-1))
    rho = d**(-1/(gamma-1))

    area = sp.empty(M.shape, sp.float64)
    r = sp.logical_and(M != 0, sp.isfinite(M))
    area[r] = a[r]**(-c[r]) * d[r]**c[r] / M[r]
    area[sp.logical_not(r)] = sp.inf

    return from_ndarray(itype, M, T, P, rho, area)
Пример #54
0
def default_costmatrix(numstates, dtype=numpy.int):
    "a square array with zeroes along the diagonal, ones elsewhere"
    return scipy.logical_not(scipy.identity(numstates)).astype(float)
Пример #55
0
 def execute(self, nprocesses=1) :
     """Worker funciton."""
     params = self.params
     # Make parent directory and write parameter file.
     kiyopy.utils.mkparents(params['output_root'])
     parse_ini.write_params(params, params['output_root'] + 'params.ini',
                            prefix=prefix)
     save_noise_diag = params['save_noise_diag']
     in_root = params['input_root']
     all_out_fname_list = []
     all_in_fname_list = []
     # Figure out what the band names are.
     bands = params['bands']
     if not bands:
         map_files = glob.glob(in_root + 'dirty_map_' + pol_str + "_*.npy")
         bands = []
         root_len = len(in_root + 'dirty_map_')
         for file_name in map_files:
             bands.append(file_name[root_len:-4])
     # Loop over files to process.
     for pol_str in params['polarizations']:
         for band in bands:
             if band == -1:
                 band_str = ''
             else:
                 band_str =  "_" + repr(band)
             dmap_fname = (in_root + 'dirty_map_' + pol_str + 
                           band_str + '.npy')
             all_in_fname_list.append(
                 kiyopy.utils.abbreviate_file_path(dmap_fname))
             # Load the dirty map and the noise matrix.
             dirty_map = algebra.load(dmap_fname)
             dirty_map = algebra.make_vect(dirty_map)
             if dirty_map.axes != ('freq', 'ra', 'dec') :
                 msg = ("Expeced dirty map to have axes ('freq',"
                        "'ra', 'dec'), but it has axes: "
                        + str(dirty_map.axes))
                 raise ce.DataError(msg)
             shape = dirty_map.shape
             # Initialize the clean map.
             clean_map = algebra.info_array(sp.zeros(dirty_map.shape))
             clean_map.info = dict(dirty_map.info)
             clean_map = algebra.make_vect(clean_map)
             # If needed, initialize a map for the noise diagonal.
             if save_noise_diag :
                 noise_diag = algebra.zeros_like(clean_map)
             if params["from_eig"]:
                 # Solving from eigen decomposition of the noise instead of
                 # the noise itself.
                 # Load in the decomposition.
                 evects_fname = (in_root + 'noise_evects_' + pol_str +
                                 + band_str + '.npy')
                 if self.feedback > 1:
                     print "Using dirty map: " + dmap_fname
                     print "Using eigenvectors: " + evects_fname
                 evects = algebra.open_memmap(evects_fname, 'r')
                 evects = algebra.make_mat(evects)
                 evals_inv_fname = (in_root + 'noise_evalsinv_' + pol_str
                                    + "_" + repr(band) + '.npy')
                 evals_inv = algebra.load(evals_inv_fname)
                 evals_inv = algebra.make_mat(evals_inv)
                 # Solve for the map.
                 if params["save_noise_diag"]:
                     clean_map, noise_diag = solve_from_eig(evals_inv,
                                 evects, dirty_map, True, self.feedback)
                 else:
                     clean_map = solve_from_eig(evals_inv,
                                 evects, dirty_map, False, self.feedback)
                 # Delete the eigen vectors to recover memory.
                 del evects
             else:
                 # Solving from the noise.
                 noise_fname = (in_root + 'noise_inv_' + pol_str +
                                band_str + '.npy')
                 if self.feedback > 1:
                     print "Using dirty map: " + dmap_fname
                     print "Using noise inverse: " + noise_fname
                 all_in_fname_list.append(
                     kiyopy.utils.abbreviate_file_path(noise_fname))
                 noise_inv = algebra.open_memmap(noise_fname, 'r')
                 noise_inv = algebra.make_mat(noise_inv)
                 # Two cases for the noise.  If its the same shape as the map
                 # then the noise is diagonal.  Otherwise, it should be
                 # block diagonal in frequency.
                 if noise_inv.ndim == 3 :
                     if noise_inv.axes != ('freq', 'ra', 'dec') :
                         msg = ("Expeced noise matrix to have axes "
                                 "('freq', 'ra', 'dec'), but it has: "
                                 + str(noise_inv.axes))
                         raise ce.DataError(msg)
                     # Noise inverse can fit in memory, so copy it.
                     noise_inv_memory = sp.array(noise_inv, copy=True)
                     # Find the non-singular (covered) pixels.
                     max_information = noise_inv_memory.max()
                     good_data = noise_inv_memory < 1.0e-10*max_information
                     # Make the clean map.
                     clean_map[good_data] = (dirty_map[good_data] 
                                             / noise_inv_memory[good_data])
                     if save_noise_diag :
                         noise_diag[good_data] = \
                                 1/noise_inv_memory[good_data]
                 elif noise_inv.ndim == 5 :
                     if noise_inv.axes != ('freq', 'ra', 'dec', 'ra',
                                           'dec'):
                         msg = ("Expeced noise matrix to have axes "
                                "('freq', 'ra', 'dec', 'ra', 'dec'), "
                                "but it has: " + str(noise_inv.axes))
                         raise ce.DataError(msg)
                     # Arrange the dirty map as a vector.
                     dirty_map_vect = sp.array(dirty_map) # A view.
                     dirty_map_vect.shape = (shape[0], shape[1]*shape[2])
                     frequencies = dirty_map.get_axis('freq')/1.0e6
                     # Allowcate memory only once.
                     noise_inv_freq = sp.empty((shape[1], shape[2], 
                                     shape[1], shape[2]), dtype=float)
                     if self.feedback > 1 :
                         print "Inverting noise matrix."
                     # Block diagonal in frequency so loop over frequencies.
                     for ii in xrange(dirty_map.shape[0]) :
                         if self.feedback > 1:
                             print "Frequency: ", "%5.1f"%(frequencies[ii]),
                         if self.feedback > 2:
                             print ", start mmap read:",
                             sys.stdout.flush()
                         noise_inv_freq[...] = noise_inv[ii, ...]
                         if self.feedback > 2:
                             print "done, start eig:",
                             sys.stdout.flush()
                         noise_inv_freq.shape = (shape[1]*shape[2],
                                                 shape[1]*shape[2])
                         # Solve the map making equation by diagonalization.
                         noise_inv_diag, Rot = sp.linalg.eigh(
                             noise_inv_freq, overwrite_a=True)
                         if self.feedback > 2:
                             print "done",
                         map_rotated = sp.dot(Rot.T, dirty_map_vect[ii])
                         # Zero out infinite noise modes.
                         bad_modes = (noise_inv_diag
                                      < 1.0e-5 * noise_inv_diag.max())
                         if self.feedback > 1:
                             print ", discarded: ",
                             print "%4.1f" % (100.0 * sp.sum(bad_modes) 
                                              / bad_modes.size),
                             print "% of modes",
                         if self.feedback > 2:
                             print ", start rotations:",
                             sys.stdout.flush()
                         map_rotated[bad_modes] = 0.
                         noise_inv_diag[bad_modes] = 1.0
                         # Solve for the clean map and rotate back.
                         map_rotated /= noise_inv_diag
                         map = sp.dot(Rot, map_rotated)
                         if self.feedback > 2:
                             print "done",
                             sys.stdout.flush()
                         # Fill the clean array.
                         map.shape = (shape[1], shape[2])
                         clean_map[ii, ...] = map
                         if save_noise_diag :
                             # Using C = R Lambda R^T 
                             # where Lambda = diag(1/noise_inv_diag).
                             temp_noise_diag = 1/noise_inv_diag
                             temp_noise_diag[bad_modes] = 0
                             # Multiply R by the diagonal eigenvalue matrix.
                             # Broadcasting does equivalent of mult by diag
                             # matrix.
                             temp_mat = Rot*temp_noise_diag
                             # Multiply by R^T, but only calculate the
                             # diagonal elements.
                             for jj in range(shape[1]*shape[2]) :
                                 temp_noise_diag[jj] = sp.dot(
                                     temp_mat[jj,:], Rot[jj,:])
                             temp_noise_diag.shape = (shape[1], shape[2])
                             noise_diag[ii, ...] = temp_noise_diag
                         # Return workspace memory to origional shape.
                         noise_inv_freq.shape = (shape[1], shape[2],
                                                 shape[1], shape[2])
                         if self.feedback > 1:
                             print ""
                             sys.stdout.flush()
                 elif noise_inv.ndim == 6 :
                     if save_noise_diag:
                         # OLD WAY.
                         #clean_map, noise_diag, chol = solve(noise_inv,
                         #        dirty_map, True, feedback=self.feedback)
                         # NEW WAY.
                         clean_map, noise_diag, noise_inv_diag, chol = \
                                   solve(noise_fname, noise_inv, dirty_map,
                                   True, feedback=self.feedback)
                     else:
                         # OLD WAY.
                         #clean_map, chol = solve(noise_inv, dirty_map, 
                         #            False, feedback=self.feedback)
                         # NEW WAY.
                         clean_map, noise_inv_diag, chol = \
                                   solve(noise_fname, noise_inv, dirty_map,
                                   False, feedback=self.feedback)
                     if params['save_cholesky']:
                         chol_fname = (params['output_root'] + 'chol_'
                                     + pol_str + band_str + '.npy')
                         sp.save(chol_fname, chol)
                     if params['save_noise_inv_diag']:
                         noise_inv_diag_fname = (params['output_root'] +
                                    'noise_inv_diag_' + pol_str + band_str 
                                    + '.npy')
                         algebra.save(noise_inv_diag_fname, noise_inv_diag)
                     # Delete the cholesky to recover memory.
                     del chol
                 else :
                     raise ce.DataError("Noise matrix has bad shape.")
                 # In all cases delete the noise object to recover memeory.
                 del noise_inv
             # Write the clean map to file.
             out_fname = (params['output_root'] + 'clean_map_'
                          + pol_str + band_str + '.npy')
             if self.feedback > 1:
                 print "Writing clean map to: " + out_fname
             algebra.save(out_fname, clean_map)
             all_out_fname_list.append(
                 kiyopy.utils.abbreviate_file_path(out_fname))
             if save_noise_diag :
                 noise_diag_fname = (params['output_root'] + 'noise_diag_'
                                     + pol_str + band_str + '.npy')
                 algebra.save(noise_diag_fname, noise_diag)
                 all_out_fname_list.append(
                     kiyopy.utils.abbreviate_file_path(noise_diag_fname))
             # Check the clean map for faileur.
             if not sp.alltrue(sp.isfinite(clean_map)):
                 n_bad = sp.sum(sp.logical_not(sp.isfinite(clean_map)))
                 msg = ("Non finite entries found in clean map. Solve"
                        " failed. %d out of %d entries bad" 
                        % (n_bad, clean_map.size)) 
                 raise RuntimeError(msg)