예제 #1
0
    def copy(self) :
        new = Map.empty()

        new.data[0] = ma.copy(self.data[0])
        new.data[1] = ma.copy(self.data[1])
        new.header = copy.deepcopy(self.header)

        return new
예제 #2
0
def interpolate_2d_nearest(x: np.ndarray, y: np.ndarray, z: np.ndarray,
                           x_new: np.ndarray,
                           y_new: np.ndarray) -> ma.MaskedArray:
    """2D nearest neighbor interpolation preserving mask.

    Args:
        x: 1D array, x-coordinates.
        y: 1D array, y-coordinates.
        z: 2D masked array, data values.
        x_new: 1D array, new x-coordinates.
        y_new: 1D array, new y-coordinates.

    Returns:
        Interpolated 2D masked array.

    Notes:
        Points outside the original range will be interpolated but masked.

    """
    data = ma.copy(z)
    fun = RegularGridInterpolator((x, y),
                                  data,
                                  method="nearest",
                                  bounds_error=False,
                                  fill_value=ma.masked)
    xx, yy = np.meshgrid(x_new, y_new)
    return fun((xx, yy)).T
예제 #3
0
 def screen_data(
     self,
     data_in: np.ndarray,
     snr_limit: float = 5,
     is_smoothed: bool = False,
     keep_negative: bool = False,
     filter_fog: bool = True,
     filter_negatives: bool = True,
     filter_snr: bool = True,
 ) -> np.ndarray:
     data = ma.copy(data_in)
     self._calc_range_uncorrected(data)
     noise = _estimate_background_noise(data)
     noise = self._adjust_noise(noise, is_smoothed)
     if filter_negatives is True:
         is_negative = self._mask_low_values_above_consequent_negatives(
             data)
         noise[is_negative] = 1e-12
     if filter_fog is True:
         is_fog = self._find_fog_profiles()
         self._clean_fog_profiles(data, is_fog)
         noise[is_fog] = 1e-12
     if filter_snr is True:
         data = self._remove_noise(data, noise, keep_negative, snr_limit)
     self._calc_range_corrected(data)
     return data
예제 #4
0
def array_to_probability(arr_in, loc, scale, invert=False):
    """Converts continuous variable into 0-1 probability.

    Args:
        arr_in (MaskedArray): Masked numpy array.
        loc (float): Center of the distribution. Values smaller than this
            will have small probability. Values greater than this will have
            large probability.
        scale (float): Width of the distribution, i.e., how fast the probability
            drops or increases from the peak.
        invert (bool, optional): If True, large values have small
            probability and vice versa. Default is False.

    Returns:
        ndarray: Probability.

    """
    arr = ma.copy(arr_in)
    prob = np.zeros(arr.shape)
    ind = ~arr.mask
    if invert:
        arr *= -1
        loc *= -1
    prob[ind] = stats.norm.cdf(arr[ind], loc=loc, scale=scale)
    return prob
예제 #5
0
def my_write_sdfits(filename, data):
    # the CORE keywords/columns that need to be present
    core = [
        'DATE-OBS', 'TSYS', 'DATA', 'EXPOSURE', 'TELESCOP', 'BANDWID', 'OBJECT'
    ]
    keys = ['FEED']

    dims = data.shape
    dims1 = dims[:-1]  # all dims but nchan
    nchan = dims[-1]  #
    n1 = dimsize(dims1)  # this is naxis2
    data1 = data.reshape(n1, nchan)

    print('SHAPE=', dims)
    print('NAXIS2=', n1)
    print('NCHAN=', nchan)

    # DATE-OBS
    a1 = np.arange(n1, dtype=np.float64)
    # TSYS
    a2 = np.ones(n1, dtype=float)
    # DATA:  make a copy, and apply the mask
    if data1.count() == dimsize(data1.shape):
        print("All data good, no masking operation needed")
        a3 = data1
    else:
        print("Some data masked, using a copy to write")
        a3 = ma.copy(data1)
        np.putmask(a3, a3.mask, np.nan)
    # FEED
    a4 = np.arange(n1, dtype=int)

    print(a1.shape)
    print(a3.shape)

    col1 = fits.Column(name='DATE-OBS', format='D', array=a1)
    col2 = fits.Column(name='TSYS', format='E', array=a2)
    col3 = fits.Column(name='DATA', format='%dE' % nchan, array=a3)
    col4 = fits.Column(name='FEED', format='I', array=a4)

    cols = fits.ColDefs([col1, col2, col3, col4])
    hdu = fits.BinTableHDU.from_columns(cols)
    # mark it as 'SINGLE DISH'
    hdu.header['EXTNAME'] = 'SINGLE DISH'
    hdu.header['EXTVER'] = 1

    # write the CORE keywords that do not vary
    hdu.header['EXPOSURE'] = 0.1  # sec
    hdu.header['TELESCOP'] = 'LMT/GTM'
    hdu.header['INSTRUME'] = 'lmtoy'
    hdu.header['OBJECT'] = 'NOISE'
    hdu.header['BANDWID'] = 800.0e6  # Hz

    # write some provenance
    hdu.header['ORIGIN'] = 'LMTOY test'
    hdu.header['DATADIMS'] = str(dims)

    # finish up and write the file
    hdu.writeto(filename, overwrite=True)
    print("Written %s" % filename)
예제 #6
0
def array_to_probability(array: np.ndarray,
                         loc: float,
                         scale: float,
                         invert: bool = False) -> np.ndarray:
    """Converts continuous variable into 0-1 probability.

    Args:
        array: Numpy array.
        loc: Center of the distribution. Values smaller than this will have small probability.
            Values greater than this will have large probability.
        scale: Width of the distribution, i.e., how fast the probability drops or increases from
            the peak.
        invert: If True, large values have small probability and vice versa. Default is False.

    Returns:
        Probability with the same shape as the input data.

    """
    arr = ma.copy(array)
    prob = np.zeros(arr.shape)
    ind = ~arr.mask
    if invert:
        arr *= -1
        loc *= -1
    prob[ind] = stats.norm.cdf(arr[ind], loc=loc, scale=scale)
    return prob
예제 #7
0
 def _get_liquid_atten(self) -> ma.MaskedArray:
     """Finds radar liquid attenuation."""
     lwp = ma.copy(self._mwr["lwp"][:])
     lwp[lwp < 0] = 0
     lwc = calc_adiabatic_lwc(self._lwc_dz_err, self._dheight)
     lwc_scaled = distribute_lwp_to_liquid_clouds(lwc, lwp)
     return self._calc_attenuation(lwc_scaled)
def augment_translate(X, Y_loc, max_translate=(20, 20)):
    """
    Inputs:
        max_translate | tuple | max x translation, max y translation
    """
    import numpy as np
    import numpy.ma as ma

    n_pixs = X.shape[1]  # normally 224

    X_translate = ma.copy(X)
    Y_loc_translate = ma.copy(Y_loc)
    x_translations = np.random.randint(
        0, 2 * max_translate[0], X.shape[0]
    )  # translations could be + or - max translation, but everything is positive when indexing arrays so double the max translation
    y_translations = np.random.randint(0, 2 * max_translate[1], X.shape[0])

    Y_loc_translate[:, 0] -= x_translations - max_translate[
        0]  # these are the x centres
    Y_loc_translate[:, 1] -= y_translations - max_translate[
        1]  # these are the y centres

    for n_ifg, ifg in enumerate(
            X
    ):  #loop through each ifg, but ma doesn't have a pad (ie can't pad masked arrays)
        ifg_large_data = np.pad(
            ma.getdata(ifg), ((max_translate[1], max_translate[1]),
                              (max_translate[0], max_translate[0]), (0, 0)),
            mode='edge')  # padding the data  (y then x then channels)
        ifg_large_mask = np.pad(
            ma.getmask(ifg), ((max_translate[1], max_translate[1]),
                              (max_translate[0], max_translate[0]), (0, 0)),
            mode='edge')  # padding the mask (y then x then channels)
        ifg_large = ma.array(
            ifg_large_data, mask=ifg_large_mask
        )  # recombining the padded mask and data to make an enlarged masked array
        ifg_crop = ifg_large[
            y_translations[n_ifg]:y_translations[n_ifg] + n_pixs,
            x_translations[n_ifg]:x_translations[n_ifg] +
            n_pixs, :]  # crop from the large ifg back to the original resolution
        X_translate[
            n_ifg, :, :, :] = ifg_crop  # append result to big rank 4 of ifgs

    return X_translate, Y_loc_translate
예제 #9
0
def _reset_low_values_above_saturation(beta_in, is_saturation,
                                       saturation_noise):
    """Removes low values in saturated profiles above peak."""
    beta = ma.copy(beta_in)
    for saturated_profile in np.where(is_saturation)[0]:
        profile = beta[saturated_profile, :]
        peak_ind = np.argmax(profile)
        alt_ind = np.where(profile[peak_ind:] < saturation_noise)[0] + peak_ind
        beta[saturated_profile, alt_ind] = ma.masked
    return beta
예제 #10
0
def get_tagged(wf, tags):
    all_clusters = reduce(set.union, tags[tags != -1].ravel(), set())

    masked_wf = wf.view(ma.MaskedArray)
    masked_wf.fill_value = 0.
    for icluster in all_clusters:
        masked_wf.mask = False  # unmask everything
        masked_wf[~has_tag(tags, icluster)] = ma.masked

        yield ma.copy(masked_wf)
def decodeImage(image):
    watermark = copy(image)
    colorsRange = range(watermark.shape[2])
    for currentColor in colorsRange:
        for pixelRowNumber, pixelColumnNumber in createIteratorOverPixels(
                watermark[..., currentColor]):
            currentPixel = watermark[
                ..., currentColor][pixelRowNumber][pixelColumnNumber]
            watermark[..., currentColor][pixelRowNumber][pixelColumnNumber] = (
                currentPixel >> bitShift & 1) * 255

    return watermark
def augment_rotate(X, Y_loc):
    """ Rotate data and the label.  Angles are random in range [0 360], and different for each sample.  
    Note: Location labels aren't rotated!  Assumed to be roughly square.  
    Inputs:
        X | r4 array | samples x height x width x channels
        Y_loc | r2 array | samples X 4
    Returns:
        
    """
    import numpy as np
    import numpy.ma as ma

    def rot(image, xy, angle):
        """Taken from stack exchange """
        from scipy.ndimage import rotate
        im_rot = rotate(image, angle, reshape=False, mode='nearest')
        org_center = (np.array(image.shape[:2][::-1]) - 1) / 2.
        rot_center = (np.array(im_rot.shape[:2][::-1]) - 1) / 2.
        org = xy - org_center
        a = np.deg2rad(angle)
        new = np.array([
            org[0] * np.cos(a) + org[1] * np.sin(a),
            -org[0] * np.sin(a) + org[1] * np.cos(a)
        ])
        return im_rot, new + rot_center

    X_rotate = ma.copy(X)
    Y_loc_rotate = ma.copy(Y_loc)
    rotate_angles_deg = np.random.randint(0, 360, X.shape[0])

    for n_ifg, ifg in enumerate(X):  #loop through each ifg
        ifg_rot, xy_rot = rot(ifg, Y_loc_rotate[n_ifg, :2],
                              rotate_angles_deg[n_ifg])
        X_rotate[n_ifg, :, :, :] = ifg_rot
        Y_loc_rotate[n_ifg, :2] = xy_rot

    return X_rotate, Y_loc_rotate
예제 #13
0
def mask_after_cross(xsarray):
    xsarray = ma.copy(xsarray)
    marked = np.where(xsarray <= 1.0, xsarray, np.full_like(xsarray, 10.))
    maxes = np.max(marked, axis=2)
    max_idcs = np.argmax(marked, axis=2)
    cross_idcs = np.where(maxes == 10., max_idcs,
                          np.full_like(max_idcs, xsarray.shape[2]))

    # Note: Don't know to this with NumPy. But it's fast enough anyway.
    for i_round in xrange(xsarray.shape[0]):
        for i_episode in xrange(xsarray.shape[1]):
            xsarray[i_round, i_episode, cross_idcs[i_round, i_episode]:] \
                = ma.masked

    return xsarray
예제 #14
0
 def calc_beta_smooth(self,
                      beta: np.ndarray,
                      snr_limit: int = 5,
                      range_corrected: bool = True) -> np.ndarray:
     noisy_data = NoisyData(self.data, self.noise_param, range_corrected)
     beta_raw = ma.copy(self.data["beta_raw"])
     cloud_ind, cloud_values, cloud_limit = _estimate_clouds_from_beta(beta)
     beta_raw[cloud_ind] = cloud_limit
     sigma = calc_sigma_units(self.data["time"], self.data["range"])
     beta_raw_smooth = gaussian_filter(beta_raw, sigma)
     beta_raw_smooth[cloud_ind] = cloud_values
     beta_smooth = noisy_data.screen_data(beta_raw_smooth,
                                          is_smoothed=True,
                                          snr_limit=snr_limit)
     return beta_smooth
예제 #15
0
 def test_by_number_mask(self):
     wbins = 8
     # Set data = f.
     self.Data.data[...] = sp.arange(self.Data.data.shape[-1])
     self.Data.data[3,1,1,54] = ma.masked
     self.Data.data[5,1,1,70:82] = ma.masked
     old_data = ma.copy(self.Data.data)
     delta =  self.Data.field['CDELT1']        
     # Rebin.
     rebin_freq.rebin(self.Data, wbins, mean=True, by_nbins=True)
     self.Data.verify()
     # Except for the last bin, Data should still be freq.
     self.assertAlmostEqual(self.Data.data[3,1,1,6], 
                            ma.mean(old_data[3,1,1,48:56]))
     self.assertTrue(self.Data.data[5,1,1,9] is ma.masked)
예제 #16
0
def find_freezing_region(obs: ClassData,
                         melting_layer: np.ndarray) -> np.ndarray:
    """Finds freezing region using the model temperature and melting layer.

    Every profile that contains melting layer, subzero region starts from
    the mean melting layer height. If there are (long) time windows where
    no melting layer is present, model temperature is used in the
    middle of the time window. Finally, the subzero altitudes are linearly
    interpolated for all profiles.

    Args:
        obs: The :class:`ClassData` instance.
        melting_layer: 2-D boolean array denoting melting layer.

    Returns:
        2-D boolean array denoting the sub-zero region.

    Notes:
        It is not clear how model temperature and melting layer should be
        ideally combined to determine the sub-zero region. This current
        method differs slightly from the original Matlab code and should
        be validated more carefully later.

    """
    is_freezing = np.zeros(obs.tw.shape, dtype=bool)
    t0_alt = _find_t0_alt(obs.tw, obs.height)
    mean_melting_alt = _find_mean_melting_alt(obs, melting_layer)

    if _is_all_freezing(mean_melting_alt, t0_alt, obs.height):
        logging.info(
            "All temperatures below freezing and no detected melting layer")
        return np.ones(obs.tw.shape, dtype=bool)

    freezing_alt = ma.copy(mean_melting_alt)

    for ind in (0, -1):
        freezing_alt[ind] = mean_melting_alt[ind] or t0_alt[ind]
    win = utils.n_elements(obs.time, 240, "time")  # 4h window
    mid_win = int(win / 2)
    for n in range(len(obs.time) - win):
        if mean_melting_alt[n:n + win].mask.all():
            freezing_alt[n + mid_win] = t0_alt[n + mid_win]
    ind = ~freezing_alt.mask
    f = interp1d(obs.time[ind], freezing_alt[ind])
    freezing_alt_interpolated = f(obs.time) - 1
    for ii, alt in enumerate(freezing_alt_interpolated):
        is_freezing[ii, obs.height > alt] = True
    return is_freezing
예제 #17
0
 def filter_stripes(self, variable: str) -> None:
     """Filters vertical and horizontal stripe-shaped artifacts from radar data."""
     if variable not in self.data:
         return
     data = ma.copy(self.data[variable][:])
     n_points_in_profiles = ma.count(data, axis=1)
     n_profiles_with_data = np.count_nonzero(n_points_in_profiles)
     if n_profiles_with_data < 300:
         return
     n_vertical = self._filter(data, 1, min_coverage=0.5, z_limit=10, distance=4, n_blocks=100)
     n_horizontal = self._filter(data, 0, min_coverage=0.3, z_limit=-30, distance=3, n_blocks=20)
     if n_vertical > 0 or n_horizontal > 0:
         logging.info(
             f"Filtered {n_vertical} vertical and {n_horizontal} horizontal stripes "
             f"from radar data using {variable}"
         )
예제 #18
0
    def _screen_by_snr(self, beta_uncorrected, is_saturation, smooth=False):
        """Screens noise from ceilometer backscatter.

        Args:
            beta_uncorrected (ndarray): Range-uncorrected backscatter.
            is_saturation (ndarray): Boolean array denoting saturated profiles.
            smooth (bool, optional): Should be true if input beta is smoothed.
                Default is False.

        """
        beta = ma.copy(beta_uncorrected)
        n_gates, _, saturation_noise, noise_min = self.noise_params
        noise_min = noise_min[0] if smooth else noise_min[1]
        noise = _estimate_noise_from_top_gates(beta, n_gates, noise_min)
        beta = _reset_low_values_above_saturation(beta, is_saturation,
                                                  saturation_noise)
        beta = _remove_noise(beta, noise)
        return beta
예제 #19
0
def _find_drizzle_and_falling(is_liquid: np.ndarray, is_falling: np.ndarray,
                              is_freezing: np.ndarray) -> np.ndarray:
    """Classifies pixels as falling, drizzle and others.

    Args:
        is_liquid: 2D boolean array denoting liquid layers.
        is_falling: 2D boolean array denoting falling pixels.
        is_freezing: 2D boolean array denoting subzero temperatures.

    Returns:
        2D array where values are 1 (falling), 2 (drizzle), and masked (all others).

    """
    falling_dry = is_falling & ~is_liquid
    drizzle = falling_dry & ~is_freezing
    drizzle_and_falling = falling_dry.astype(int) + drizzle.astype(int)
    drizzle_and_falling = ma.copy(drizzle_and_falling)
    drizzle_and_falling[drizzle_and_falling == 0] = ma.masked
    return drizzle_and_falling
예제 #20
0
    def calc_beta(self) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
        """Converts range-corrected raw beta to noise-screened beta."""

        def _screen_beta(beta_in: np.ndarray, smooth: bool) -> np.ndarray:
            beta_in = _calc_range_uncorrected_beta(beta_in, range_squared)
            beta_in = self._screen_by_snr(beta_in, is_saturation, beta_is_smoothed=smooth)
            return _calc_range_corrected_beta(beta_in, range_squared)

        range_squared = _get_range_squared(self.range)
        is_saturation = self._find_saturated_profiles()
        beta = _screen_beta(self.backscatter, False)
        # smoothed version:
        beta_smooth = ma.copy(self.backscatter)
        cloud_ind, cloud_values, cloud_limit = _estimate_clouds_from_beta(beta)
        beta_smooth[cloud_ind] = cloud_limit
        sigma = _calc_sigma_units(self.time, self.range)
        beta_smooth = scipy.ndimage.filters.gaussian_filter(beta_smooth, sigma)
        beta_smooth[cloud_ind] = cloud_values
        beta_smooth = _screen_beta(beta_smooth, True)
        return self.backscatter, beta, beta_smooth
예제 #21
0
def l2norm(*args) -> ma.MaskedArray:
    """Returns l2 norm.

    Args:
       *args: Variable number of data (*array_like*) with the same shape.

    Returns:
        The l2 norm.

    """
    ss = 0
    for arg in args:
        if isinstance(arg, ma.MaskedArray):
            # Raise only non-masked values, not sure if this is needed...
            arg = ma.copy(arg)
            arg[~arg.mask] = arg[~arg.mask]**2
        else:
            arg = arg**2
        ss = ss + arg
    return ma.sqrt(ss)
예제 #22
0
def _add_axes_back(arr, axis):
    '''
    Add axes back in again after they've been processed out.
    
    This is not a great implementation. I'm sure it can be improved upon. The
    only reason this function exists at all is because the numpy.ma math
    functions don't consistently take the keepdims option.
    '''
    # Here is the scenario. We have some array whose shape was originally
    # something like (a,b,c,d). We processed out the second and fourth axes
    # by summing over them (axis=(1,3)). At this point, we have an array whose
    # shape is now (a,c). Unfortunately, this array shape does not work well
    # for broadcasting against our original data. What we need is an array
    # with shape (a,1,c,1).
    #
    # So, this function needs to call ma.expand_dims once for each axis that
    # was removed.

    # make sure axis is iterable
    try:
        iter(axis)
    except TypeError:
        # singleton. Make iterable.
        axis = (axis, )

    if arr is ma.masked:
        # For reasons unknown, ma.expand_dims does nothing for the ma.masked.
        # Other singletons work fine. No idea why. I've submitted a bug report:
        # https://github.com/numpy/numpy/issues/7424
        new_shape = np.ones(len(axis), dtype=int)
        new_arr = np.zeros(new_shape)
        new_arr = ma.masked_array(data=new_arr, mask=True)
    else:
        # Otherwise, systematically add back the dimensions removed.
        new_arr = ma.copy(arr)
        for i in np.sort(axis):
            new_arr = ma.expand_dims(new_arr, i)

    return new_arr
예제 #23
0
    def FillNa(self, votes_na, ScaledIndex):
        """Uses exisiting data and reputations to fill missing observations.
        Essentially a weighted average using all availiable non-NA data.
        How much should slackers who arent voting suffer? I decided this would
        depend on the global percentage of slacking.
        """
        # In case no Missing values, Mnew and votes_na will be the same.
        votes_na_new = ma.copy(votes_na)

        # Of course, only do this process if there ARE missing values.
        if votes_na.mask.any():

            # Our best guess for the Decision state (FALSE=0, Ambiguous=.5, TRUE=1)
            # so far (ie, using the present, non-missing, values).
            DecisionOutcomes_Raw = self.GetDecisionOutcomes(votes_na, ScaledIndex).squeeze()

            # Fill in the predictions to the original M
            NAmat = votes_na.mask  # Defines the slice of the matrix which needs to be edited.
            votes_na_new[NAmat] = 0  # Erase the NA's

            # Slightly complicated:
            NAsToFill = np.dot(NAmat, np.diag(DecisionOutcomes_Raw))

            # This builds a matrix whose columns j:
            #   NAmat was false (the observation wasn't missing) - have a value of Zero
            #   NAmat was true (the observation was missing)     - have a value of the jth element of DecisionOutcomes.Raw (the 'current best guess')
            votes_na_new += NAsToFill
            # This replaces the NAs, which were zeros, with the predicted Decision outcome.

            # Appropriately force the predictions into their discrete
            # (0,.5,1) slot. (continuous variables can be gamed).
            rows, cols = votes_na_new.shape
            for i in range(rows):
                for j in range(cols):
                    if not ScaledIndex[j]:
                        votes_na_new[i][j] = self.Catch(votes_na_new[i][j])

        return votes_na_new
예제 #24
0
파일: reductions.py 프로젝트: imclab/uvis
def delete_singleton_axis( mv, vid=None ):
    """If mv depends on an axis with just one value, create a copy of mv without that axis, and
    without the corresponding data dimension.  Normally this happens when time has been averaged
    out, but there is still a one-valued time axis left (thus one would normally use id='time').
    You can specify the axis id if there might be more than one singleton."""
    axes = allAxes(mv)
    saxis = None
    si = None
    for i in range(len(axes)):
        if len(axes[i])==1 and (vid==None or axes[i].id==vid):
            saxis = axes[i]
            si = i
            del axes[si]
            break
    if saxis==None: return mv
    data = ma.copy( mv.data )
    if numpy.version.version >= '1.7.0':
        data = ma.squeeze( data, axis=si )
    else:
        data = ma.squeeze( data )   # let's hope that there's only one singleton!
    mvnew = cdms2.createVariable ( data, axes=axes, id=mv.id )
    if hasattr(mv,'units'): mvnew.units = mv.units
    return mvnew
예제 #25
0
def find_freezing_region(obs, melting_layer):
    """Finds freezing region using the model temperature and melting layer.

    Every profile that contains melting layer, subzero region starts from
    the mean melting layer height. If there are (long) time windows where
    no melting layer is present, model temperature is used in the
    middle of the time window. Finally, the subzero altitudes are linearly
    interpolated for all profiles.

    Args:
        obs (ClassData): The :class:`ClassData` instance.
        melting_layer (ndarray): 2-D boolean array denoting melting layer.

    Returns:
        ndarray: 2-D boolean array denoting the sub-zero region.

    Notes:
        It is not clear how model temperature and melting layer should be
        ideally combined to determine the sub-zero region.

    """
    is_freezing = np.zeros(obs.tw.shape, dtype=bool)
    t0_alt = _find_t0_alt(obs.tw, obs.height)
    mean_melting_alt = _find_mean_melting_alt(obs, melting_layer)
    freezing_alt = ma.copy(mean_melting_alt)
    for ind in (0, -1):
        freezing_alt[ind] = mean_melting_alt[ind] or t0_alt[ind]
    win = utils.n_elements(obs.time, 240, 'time')  # 4h window
    mid_win = int(win / 2)
    for n in range(len(obs.time) - win):
        if mean_melting_alt[n:n + win].mask.all():
            freezing_alt[n + mid_win] = t0_alt[n + mid_win]
    ind = ~freezing_alt.mask
    f = interp1d(obs.time[ind], freezing_alt[ind])
    for ii, alt in enumerate(f(obs.time)):
        is_freezing[ii, obs.height > alt] = True
    return is_freezing
예제 #26
0
def _mark_gaps(time: np.ndarray,
               data: ma.MaskedArray,
               max_allowed_gap: float = 1) -> tuple:
    assert time[0] >= 0
    assert time[-1] <= 24
    max_gap = max_allowed_gap / 60
    if not ma.is_masked(data):
        mask_new = np.zeros(data.shape)
    elif ma.all(data.mask) is ma.masked:
        mask_new = np.ones(data.shape)
    else:
        mask_new = np.copy(data.mask)
    data_new = ma.copy(data)
    time_new = np.copy(time)
    gap_indices = np.where(np.diff(time) > max_gap)[0]
    temp_array = np.zeros((2, data.shape[1]))
    temp_mask = np.ones((2, data.shape[1]))
    time_delta = 0.001
    for ind in np.sort(gap_indices)[::-1]:
        ind += 1
        data_new = np.insert(data_new, ind, temp_array, axis=0)
        mask_new = np.insert(mask_new, ind, temp_mask, axis=0)
        time_new = np.insert(time_new, ind, time[ind] - time_delta)
        time_new = np.insert(time_new, ind, time[ind - 1] + time_delta)
    if (time[0] - 0) > max_gap:
        data_new = np.insert(data_new, 0, temp_array, axis=0)
        mask_new = np.insert(mask_new, 0, temp_mask, axis=0)
        time_new = np.insert(time_new, 0, time[0] - time_delta)
        time_new = np.insert(time_new, 0, time_delta)
    if (24 - time[-1]) > max_gap:
        ind = mask_new.shape[0]
        data_new = np.insert(data_new, ind, temp_array, axis=0)
        mask_new = np.insert(mask_new, ind, temp_mask, axis=0)
        time_new = np.insert(time_new, ind, 24 - time_delta)
        time_new = np.insert(time_new, ind, time[-1] + time_delta)
    data_new.mask = mask_new
    return time_new, data_new
예제 #27
0
    def Rescale(self):
        """Forces a matrix of raw (user-supplied) information
        (for example, # of House Seats, or DJIA) to conform to
        SVD-appropriate range.

        Practically, this is done by subtracting min and dividing by
        scaled-range (which itself is max-min).

        """
        # Calulate multiplicative factors
        InvSpan = []
        for scale in self.decision_bounds:
            InvSpan.append(1 / float(scale["max"] - scale["min"]))

        # Recenter
        OutMatrix = ma.copy(self.votes)
        cols = self.votes.shape[1]
        for i in range(cols):
            OutMatrix[:,i] -= self.decision_bounds[i]["min"]

        # Rescale
        OutMatrix[np.isnan(OutMatrix)] = np.mean(OutMatrix)

        return np.dot(OutMatrix, np.diag(InvSpan))
예제 #28
0
def plot_params(var_name='', nc=None, val=None, plotter=None) :
	"""Suggests a number of plot parameters for a ModelE ScaledACC output variable.
	Output to be used directly as kwargs for giss.plot.plot_var()

	Args:
		var_name (string):
			Name of the variable to plot (from the Scaled ACC file)
		nc (netCDF4.Dataset, OPTIONAL):
			Open netCDF Scaled ACC file.
			If set, then data and meta-data will be read from this file.
		val (np.array, OPTIONAL):
			Field to plot.  If not set, then will be read from the netCDF file.
		plotter (OPTIONAL):
			Plotter to use when plotting data of this shape.

	Returns: Dictionary with the following elements
		plotter (giss.plot.*Plotter):
			Abstracts away grid geometry from pcolormesh() call.
			Guess at the right plotter (since this IS ModelE data).
		var_name (string):
			Name of the variable to plot (same as var_name arg)
		val (np.ma.MaskedArray):
			The value to plot
		units (string, OPTIONAL):
			Units in which val is expressed
		title (string):
			Suggested title for the plot
		plot_args (dict):
			Suggested keyword arguments for pcolormesh() command.
			Override if you like: norm, cmap, vmin, vmax
		cb_args (dict):
			Suggested keyword arguments for colorbar command.
			Override if you like: ticks, format
		plot_boundaries (function(basemap)):
			Plot map boundaries, coastlines, paralells, meridians, etc.
	"""

	info = {'var_name' : var_name}

	# Read meta-data out of the netCDF file, if we can
	if nc is not None and var_name in nc.variables :
		info.update(nc.variables[var_name].__dict__)

	# Init kwargs for Plotter.pcolormesh() command
	plot_args = {}
	info['plot_args'] = plot_args

	# Init kwargs for colorbar command
	cb_args = {}
	info['cb_args'] = cb_args

	info['var_name'] = var_name

	# Get the data
	if val is None and nc is not None:
		info['val'] = giss.modele.read_ncvar(nc, var_name)
	else :
		info['val'] = ma.copy(val)

	# Guess a plotter
	if plotter is None :
		info['plotter'] = plotters.guess_plotter(info['val'])
	else :
		info['plotter'] = plotter

#	# Rescale if needed
#	if var_name in _change_units :
#		rs = _change_units[var_name]
#		info['units'] = rs[0]
#		info['val'] = rs[1](info['val'])	# Run the scaling function

	if var_name in _zero_centered :
		plot_args['norm'] = giss.plot.AsymmetricNormalize()
		reverse = (var_name in _reverse_scale)
		plot_args['cmap'] = giss.plot.cpt('giss-cpt/BlRe.cpt', reverse=reverse).cmap
		plot_args['vmin'] = np.nanmin(info['val'])
		plot_args['vmax'] = np.nanmax(info['val'])
		cb_args['ticks'] = [plot_args['vmin'], 0, plot_args['vmax']]
		cb_args['format'] = '%0.2f'

	# These could be decent defaults for anything with a colorbar
	cb_args['location'] = 'bottom'
	cb_args['size'] = '5%'
	cb_args['pad'] = '2%'

	# Suggest a title
	if 'units' in info :
		info['title'] = '%s (%s)' % (info['var_name'], info['units'])
	else :
		info['title'] = info['var_name']

	# Default coastlines
	info['plot_boundaries'] = _default_plot_boundaries

	return info
예제 #29
0
    def fit_rm_cable_delay(self,
                           pInit,
                           IQUV,
                           maxfev=20000,
                           ftol=1e-3,
                           IQUVerr=None,
                           power2Q=0,
                           bounds=(-np.inf, np.inf),
                           method='trf',
                           noCableDelay=0,
                           smWidth=3.,
                           weights=None):
        '''fitting RM and cable delay:
        INPUT:
            initial parameter: pInit=np.array([RM,np.repeat(tau,numSubBand),np.repeat(psi,numSubBand),phi])
                               RM: rotation measure
                               tau: cable delay
                               psi: a constant phase btween U and V for different sub band
                               phi: a constant phase between Q and U
                               later two are used to rotate all power to Q
            IQUV: 4 by len(freq) array

        OPTIONAL:
            weight: an array with the same length as the input frequency, default weight=1.
            power2Q: whether to rotate all the power in U to Q
            parameters for least_squares:
            maxfev,ftol: parameters for leastsq function, default: maxfev=20000,ftol=1e-3
            bounds:default:(-np.inf,np.inf)
            method: default:'trf'
        '''
        self.noCableDelay = noCableDelay
        if self._test_data_dimension(IQUV) != 0:
            return -1
        if IQUVerr is None:
            weightsI = None
            weightsQUV = None
        else:
            weight = 1. / IQUVerr
            weight = ma.masked_invalid(weight)
            weight.set_fill_value(0)
            weights = ma.copy(weight) / weight.std()
            weightsI, weightsQUV = weights[0]**2, weights[1:]

        I, QUV = ma.copy(IQUV[0]), ma.copy(IQUV[1:])
        Ismt = self.blackman_smooth(I, weightsI=weightsI, smWidth=smWidth)
        IsmtRnm = Ismt / Ismt.mean()

        if weights is not None:
            weightsQUV = np.repeat(weights[None, :], 3, axis=0)

        paramFit = least_squares(self._loss_function,
                                 pInit,
                                 args=(QUV, weightsQUV, power2Q, IsmtRnm),
                                 max_nfev=maxfev,
                                 ftol=ftol,
                                 bounds=bounds,
                                 method=method)
        para, jac = paramFit.x, paramFit.jac
        rottedQUV = self.rot_back_QUV(para,
                                      QUV,
                                      numSubBand=self.numSubBand,
                                      power2Q=power2Q)
        #return para,jac
        jac = jac[:, [0, 1, -1]]
        if power2Q == 1 and rottedQUV[1].mean() < 0:
            para[-1] = (para[-1] + np.pi) % (2 * np.pi)
        if noCableDelay == 1:
            #para=para[[0,-1]]
            jac = jac[:, [0, -1]]
        if noCableDelay == 2:
            #para=para[[1]]
            jac = jac[:, [1]]
        cov = np.linalg.inv(jac.T.dot(jac))
        paraErr = np.sqrt(np.diagonal(cov))
        print('fitting results para, err', para, paraErr)
        return para, paraErr
예제 #30
0
def _remove_noise(beta_in: np.ndarray, noise: np.ndarray) -> np.ndarray:
    beta = ma.copy(beta_in)
    snr_limit = 5
    snr = (beta.T / noise)
    beta[snr.T < snr_limit] = ma.masked
    return beta
예제 #31
0
def _append_iwc(iwc_data, ice_class):
    iwc = ma.copy(iwc_data.data['iwc_inc_rain'][:])
    iwc[ice_class.ice_above_rain] = ma.masked
    iwc_data.append_data(iwc, 'iwc')
예제 #32
0
파일: collector.py 프로젝트: titusjan/argos
    def getSlicedArray(self, copy=True):
        """ Slice the rti using a tuple of slices made from the values of the combo and spin boxes.

            :param copy: If True (the default), a copy is made so that inspectors cannot
                accidentally modify the underlying of the RTIs. You can set copy=False as a
                potential optimization, but only if you are absolutely sure that you don't modify
                the the slicedArray in your inspector! Note that this function calls transpose,
                which can still make a copy of the array for certain permutations.

            :return: Numpy masked array with the same number of dimension as the number of
                comboboxes (this can be zero!).

                Returns None if no slice can be made (i.e. the RTI is not sliceable).
        """
        #logger.debug("getSlicedArray() called")

        if not self.rtiIsSliceable:
            return None

        # The dimensions that are selected in the combo boxes will be set to slice(None),
        # the values from the spin boxes will be set as a single integer value
        nDims = self.rti.nDims
        sliceList = [slice(None)] * nDims

        for spinBox in self._spinBoxes:
            dimNr = spinBox.property("dim_nr")
            sliceList[dimNr] = spinBox.value()

        # Make the array slicer. It needs to be a tuple, a list of only integers will be
        # interpreted as an index. With a tuple, array[(exp1, exp2, ..., expN)] is equivalent to
        # array[exp1, exp2, ..., expN].
        # See: http://docs.scipy.org/doc/numpy/reference/arrays.indexing.html
        logger.debug("Array slice list: {}".format(str(sliceList)))
        slicedArray = self.rti[tuple(sliceList)]

        # Make a copy to prevent inspectors from modifying the underlying array.
        if copy:
            slicedArray = ma.copy(slicedArray)

        # If there are no comboboxes the sliceList will contain no Slices objects, only ints. Then
        # the resulting slicedArray will be a usually a scalar (only structured fields may yield an
        # array). We convert this scalar to a zero-dimensional Numpy array so that inspectors
        # always get an array (having the same number of dimensions as the dimensionality of the
        # inspector, i.e. the number of comboboxes).
        if self.maxCombos == 0:
            slicedArray = ma.MaskedArray(slicedArray)

        # Post-condition type check
        check_is_an_array(slicedArray, np.ndarray)

        # Enforce the return type to be a masked array.
        if not isinstance(slicedArray, ma.MaskedArray):
            slicedArray = ma.MaskedArray(slicedArray)

        # Add fake dimensions of length 1 so that result.ndim will equal the number of combo boxes
        for dimNr in range(slicedArray.ndim, self.maxCombos):
            #logger.debug("Adding fake dimension: {}".format(dimNr))
            slicedArray = ma.expand_dims(slicedArray, dimNr)

        # Post-condition dimension check
        assert slicedArray.ndim == self.maxCombos, \
            "Bug: getSlicedArray should return a {:d}D array, got: {}D" \
            .format(self.maxCombos, slicedArray.ndim)

        # Convert to ArrayWithMask class for working around issues with the numpy maskedarray
        awm = ArrayWithMask.createFromMaskedArray(slicedArray)
        del slicedArray

        # Shuffle the dimensions to be in the order as specified by the combo boxes
        comboDims = [self._comboBoxDimensionIndex(cb) for cb in self._comboBoxes]
        permutations = np.argsort(comboDims)
        logger.debug("slicedArray.shape: {}".format(awm.data.shape))
        logger.debug("Transposing dimensions: {}".format(permutations))
        awm = awm.transpose(permutations)

        awm.checkIsConsistent()

        return awm
예제 #33
0
    def readAll(self):
        """Attempt to read all MetricBundles from disk.

        You must set the metrics/slicer/constraint/runName for a metricBundle appropriately;
        then this method will search for files in the location self.outDir/metricBundle.fileRoot.
        Reads all the files associated with all metricbundles in self.bundleDict.
        """
        reduceBundleDict = {}
        removeBundles = []
        for b in self.bundleDict:
            bundle = self.bundleDict[b]
            filename = os.path.join(self.outDir, bundle.fileRoot + '.npz')
            try:
                # Create a temporary metricBundle to read the data into.
                #  (we don't use b directly, as this overrides plotDict/etc).
                tmpBundle = createEmptyMetricBundle()
                tmpBundle.read(filename)
                # Copy the tmpBundle metricValues into bundle.
                bundle.metricValues = tmpBundle.metricValues
                # And copy the slicer into b, to get slicePoints.
                bundle.slicer = tmpBundle.slicer
                if self.verbose:
                    print('Read %s from disk.' % (bundle.fileRoot))
            except IOError:
                warnings.warn(
                    'Warning: file %s not found, bundle not restored.' %
                    filename)
                removeBundles.append(b)

            # Look to see if this is a complex metric, with associated 'reduce' functions,
            # and read those in too.
            if len(bundle.metric.reduceFuncs) > 0:
                origMetricName = bundle.metric.name
                for reduceFunc in bundle.metric.reduceFuncs.values():
                    reduceName = origMetricName + '_' + reduceFunc.__name__.replace(
                        'reduce', '')
                    # Borrow the fileRoot in b (we'll reset it appropriately afterwards).
                    bundle.metric.name = reduceName
                    bundle._buildFileRoot()
                    filename = os.path.join(self.outDir,
                                            bundle.fileRoot + '.npz')
                    tmpBundle = createEmptyMetricBundle()
                    try:
                        tmpBundle.read(filename)
                        # This won't necessarily recreate the plotDict and displayDict exactly
                        # as they would have been made if you calculated the reduce metric from scratch.
                        # Perhaps update these metric reduce dictionaries after reading them in?
                        newmetricBundle = MetricBundle(
                            metric=bundle.metric,
                            slicer=bundle.slicer,
                            constraint=bundle.constraint,
                            stackerList=bundle.stackerList,
                            runName=bundle.runName,
                            metadata=bundle.metadata,
                            plotDict=bundle.plotDict,
                            displayDict=bundle.displayDict,
                            summaryMetrics=bundle.summaryMetrics,
                            mapsList=bundle.mapsList,
                            fileRoot=bundle.fileRoot,
                            plotFuncs=bundle.plotFuncs)
                        newmetricBundle.metric.name = reduceName
                        newmetricBundle.metricValues = ma.copy(
                            tmpBundle.metricValues)
                        # Add the new metricBundle to our metricBundleGroup dictionary.
                        name = newmetricBundle.metric.name
                        if name in self.bundleDict:
                            name = newmetricBundle.fileRoot
                        reduceBundleDict[name] = newmetricBundle
                        if self.verbose:
                            print('Read %s from disk.' %
                                  (newmetricBundle.fileRoot))
                    except IOError:
                        warnings.warn(
                            'Warning: file %s not found, bundle not restored ("reduce" metric).'
                            % filename)

                    # Remove summaryMetrics from top level metricbundle.
                    bundle.summaryMetrics = []
                    # Update parent MetricBundle name.
                    bundle.metric.name = origMetricName
                    bundle._buildFileRoot()

        # Add the reduce bundles into the bundleDict.
        self.bundleDict.update(reduceBundleDict)
        # And remove the bundles which were not found on disk, so we don't try to make (blank) plots.
        for b in removeBundles:
            del self.bundleDict[b]
예제 #34
0
 def _calc_lwf(self, lwc_in):
     """Calculates drizzle liquid water flux."""
     flux = ma.copy(lwc_in)
     flux[self._ind_drizzle] *= (self._data.mie["lwf"][self._ind_lut] *
                                 self._data.mie["termv"][self._ind_lut[1]])
     return flux
예제 #35
0
def find_liquid(
    obs: ClassData,
    peak_amp: float = 1e-6,
    max_width: float = 300,
    min_points: int = 3,
    min_top_der: float = 1e-7,
    min_lwp: float = 0,
    min_alt: float = 100,
) -> dict:
    """Estimate liquid layers from SNR-screened attenuated backscatter.

    Args:
        obs: The :class:`ClassData` instance.
        peak_amp: Minimum value of peak. Default is 1e-6.
        max_width: Maximum width of peak. Default is 300 (m).
        min_points: Minimum number of valid points in peak. Default is 3.
        min_top_der: Minimum derivative above peak, defined as
            (beta_peak-beta_top) / (alt_top-alt_peak). Default is 1e-7.
        min_lwp: Minimum value from linearly interpolated lwp measured by the mwr. Default is 0.
        min_alt: Minimum altitude of the peak from the ground. Default is 100 (m).

    Returns:
        Dict containing `presence`, `bases` and `tops`.

    References:
        The method is based on Tuononen, M. et.al, 2019,
        https://acp.copernicus.org/articles/19/1985/2019/.

    """
    def _is_proper_peak():
        conditions = (
            npoints >= min_points,
            peak_width < max_width,
            top_der > min_top_der,
            is_positive_lwp,
            peak_alt > min_alt,
        )
        return all(conditions)

    def _save_peak_position():
        is_liquid[n, base:top + 1] = True
        liquid_top[n, top] = True
        liquid_base[n, base] = True

    lwp_int = interpolate_lwp(obs)
    beta = ma.copy(obs.beta)
    height = obs.height

    is_liquid, liquid_top, liquid_base = utils.init(3,
                                                    beta.shape,
                                                    dtype=bool,
                                                    masked=False)
    base_below_peak = utils.n_elements(height, 200)
    top_above_peak = utils.n_elements(height, 150)
    difference = np.diff(beta, axis=1)
    assert isinstance(difference, ma.MaskedArray)
    beta_diff = difference.filled(0)
    beta = beta.filled(0)
    peak_indices = _find_strong_peaks(beta, peak_amp)

    for n, peak in zip(*peak_indices):
        lprof = beta[n, :]
        dprof = beta_diff[n, :]
        try:
            base = ind_base(dprof, peak, base_below_peak, 4)
            top = ind_top(dprof, peak, height.shape[0], top_above_peak, 4)
        except IndexError:
            continue
        npoints = np.count_nonzero(lprof[base:top + 1])
        peak_width = height[top] - height[base]
        peak_alt = height[peak] - height[0]
        top_der = (lprof[peak] - lprof[top]) / (height[top] - height[peak])
        is_positive_lwp = lwp_int[n] > min_lwp
        if _is_proper_peak():
            _save_peak_position()

    return {"presence": is_liquid, "bases": liquid_base, "tops": liquid_top}
예제 #36
0
 def append_iwc(self, ice_classification: IceClassification) -> None:
     """Calculates ice water content"""
     iwc = ma.copy(self.data["iwc_inc_rain"][:])
     iwc[ice_classification.ice_above_rain] = ma.masked
     self.append_data(iwc, "iwc")
예제 #37
0
def ss_recover():
    # Preliminary data file upload
    global h, gal_num, line_num, halo_num, r_limit, vlimit, beta
    h, gal_num, line_num, halo_num, r_limit, vlimit, beta = loadtxt(
        ""
        + root
        + "/nkern/Documents/MDB_milliMil_halodata/Caustic/stack_data/"
        + str(run_loc)
        + "/program_constants.tab",
        unpack=True,
    )
    halo_num = int(halo_num)
    line_num, gal_num = int(line_num), int(gal_num)

    # Second preliminary data file upload
    global HaloID, M_crit200, R_crit200, SRAD, ESARD, HVD
    HaloID, M_crit200, R_crit200, SRAD, ESRAD, HVD = loadtxt(
        "" + root + "/nkern/Documents/MDB_milliMil_halodata/Caustic/stack_data/" + str(run_loc) + "/simdata.tab",
        unpack=True,
    )
    HaloID = str(HaloID)
    HaloID, M_crit200, R_crit200, SRAD, ESRAD, HVD = (
        HaloID[:halo_num],
        M_crit200[:halo_num],
        R_crit200[:halo_num],
        SRAD[:halo_num],
        ESRAD[:halo_num],
        HVD[:halo_num],
    )

    # First Data file upload
    global ENC_CAUMASS, ENC_INFMASS, ENC_VDISP
    j = 0
    for m in range(halo_num):
        if j == 0:  # Initialization of arrays
            ENC_CAUMASS, ENC_INFMASS, ENC_VDISP = loadtxt(
                ""
                + root
                + "/nkern/Documents/MDB_milliMil_halodata/Caustic/stack_data/"
                + str(run_loc)
                + "/halo_"
                + str(m)
                + "_constants.tab",
                usecols=(0, 1, 2),
                unpack=True,
            )
        else:
            ENC_CAUMASSt, ENC_INFMASSt, ENC_VDISPt = loadtxt(
                ""
                + root
                + "/nkern/Documents/MDB_milliMil_halodata/Caustic/stack_data/"
                + str(run_loc)
                + "/halo_"
                + str(m)
                + "_constants.tab",
                usecols=(0, 1, 2),
                unpack=True,
            )
            ENC_CAUMASS = hstack([ENC_CAUMASS, ENC_CAUMASSt])
            ENC_INFMASS = hstack([ENC_INFMASS, ENC_INFMASSt])
            ENC_VDISP = hstack([ENC_VDISP, ENC_VDISPt])
        j += 1

        # Second data file upload
    global LINE_CAUMASS, LINE_INFMASS, LINE_VDISP
    j = 0
    for m in range(halo_num):
        if j == 0:  # Initialization of arrays
            LINE_CAUMASS, LINE_INFMASS, LINE_VDISP = loadtxt(
                ""
                + root
                + "/nkern/Documents/MDB_milliMil_halodata/Caustic/stack_data/"
                + str(run_loc)
                + "/halo_"
                + str(m)
                + "_linenum.tab",
                unpack=True,
            )
        else:
            line_caumass, line_infmass, line_vdisp = loadtxt(
                ""
                + root
                + "/nkern/Documents/MDB_milliMil_halodata/Caustic/stack_data/"
                + str(run_loc)
                + "/halo_"
                + str(m)
                + "_linenum.tab",
                unpack=True,
            )
            LINE_CAUMASS = vstack([LINE_CAUMASS, line_caumass])
            LINE_INFMASS = vstack([LINE_INFMASS, line_infmass])
            LINE_VDISP = vstack([LINE_VDISP, line_vdisp])
        j += 1

        # Third data file upload
    global ENC_CAUSURF, ENC_INFSURF, ENC_INFNFW, x_range
    j = 0
    for m in range(halo_num):
        if j == 0:  # Initialization of arrays
            ENC_CAUSURF, ENC_INFSURF, ENC_INFNFW, x_range = loadtxt(
                ""
                + root
                + "/nkern/Documents/MDB_milliMil_halodata/Caustic/stack_data/"
                + str(run_loc)
                + "/halo_"
                + str(m)
                + "_profiles.tab",
                unpack=True,
            )
        else:
            enc_causurf, enc_infsurf, enc_infnfw, x_range = loadtxt(
                ""
                + root
                + "/nkern/Documents/MDB_milliMil_halodata/Caustic/stack_data/"
                + str(run_loc)
                + "/halo_"
                + str(m)
                + "_profiles.tab",
                unpack=True,
            )
            ENC_CAUSURF = vstack([ENC_CAUSURF, enc_causurf])
            ENC_INFSURF = vstack([ENC_INFSURF, enc_infsurf])
            ENC_INFNFW = vstack([ENC_INFNFW, enc_infnfw])
        j += 1

        # Fourth data file upload
    global ENC_R, ENC_V, ENC_MAG, ENC_GPX3D, ENC_GPY3D, ENC_GPZ3D, ENC_GVX3D, ENC_GVY3D, ENC_GVZ3D
    ENC_R, ENC_V, ENC_MAG, ENC_GPX3D, ENC_GPY3D, ENC_GPZ3D, ENC_GVX3D, ENC_GVY3D, ENC_GVZ3D = (
        [],
        [],
        [],
        [],
        [],
        [],
        [],
        [],
        [],
    )
    j = 0
    for m in range(halo_num):
        enc_r, enc_v, enc_mag, enc_gpx3d, enc_gpy3d, enc_gpz3d, enc_gvx3d, enc_gvy3d, enc_gvz3d = loadtxt(
            ""
            + root
            + "/nkern/Documents/MDB_milliMil_halodata/Caustic/stack_data/"
            + str(run_loc)
            + "/halo_"
            + str(m)
            + "_RVdata.tab",
            unpack=True,
        )
        ENC_R.append(enc_r)
        ENC_V.append(enc_v)
        ENC_MAG.append(enc_mag)
        ENC_GPX3D.append(enc_gpx3d)
        ENC_GPY3D.append(enc_gpy3d)
        ENC_GPZ3D.append(enc_gpz3d)
        ENC_GVX3D.append(enc_gvx3d)
        ENC_GVY3D.append(enc_gvy3d)
        ENC_GVZ3D.append(enc_gvz3d)
        j += 1
    ENC_R, ENC_V, ENC_MAG, ENC_GPX3D, ENC_GPY3D, ENC_GPZ3D, ENC_GVX3D, ENC_GVY3D, ENC_GVZ3D = (
        array(ENC_R),
        array(ENC_V),
        array(ENC_MAG),
        array(ENC_GPX3D),
        array(ENC_GPY3D),
        array(ENC_GPZ3D),
        array(ENC_GVX3D),
        array(ENC_GVY3D),
        array(ENC_GVZ3D),
    )
    # Fifth data file to upload
    global LINE_CAUSURF
    j = 0
    for m in range(halo_num):
        if j == 0:
            line_prof = loadtxt(
                ""
                + root
                + "/nkern/Documents/MDB_milliMil_halodata/Caustic/stack_data/"
                + str(run_loc)
                + "/halo_"
                + str(m)
                + "_losprofile.tab",
                unpack=True,
            )
            LINE_CAUSURF = array([line_prof[0:line_num]])
        else:
            line_prof = loadtxt(
                ""
                + root
                + "/nkern/Documents/MDB_milliMil_halodata/Caustic/stack_data/"
                + str(run_loc)
                + "/halo_"
                + str(m)
                + "_losprofile.tab",
                unpack=True,
            )
            line_causurf = array([line_prof[0:line_num]])
            LINE_CAUSURF = vstack([LINE_CAUSURF, line_causurf])
        j += 1

        # Sixth data set upload (los rv data)
    if get_los == True:
        global LINE_R, LINE_V, LINE_MAG
        LINE_R, LINE_V, LINE_MAG = [], [], []
        j = 0
        for m in range(halo_num):
            line_r, line_v, line_mag = [], [], []
            for l in range(line_num):
                r, v, mag = loadtxt(
                    ""
                    + root
                    + "/nkern/Documents/MDB_milliMil_halodata/Caustic/stack_data/"
                    + str(run_loc)
                    + "/LOS_RV/halo_"
                    + str(m)
                    + "_los_"
                    + str(l)
                    + "_rv.tab",
                    unpack=True,
                )
                line_r.append(r)
                line_v.append(v)
                line_mag.append(mag)
            LINE_R.append(line_r)
            LINE_V.append(line_v)
            LINE_MAG.append(line_mag)
        LINE_R, LINE_V, LINE_MAG = array(LINE_R), array(LINE_V), array(LINE_MAG)

        # Other data arrays to use:
    global avg_mfrac, avg_hvdfrac, stack_mfrac, stack_hvdfrac, maLINE_CAUMASS, maLINE_VDISP
    global stack_mbias, stack_mscat, stack_vbias, stack_vscat, avg_mbias, avg_mscat, avg_vbias, avg_vscat

    maLINE_CAUMASS = ma.masked_array(LINE_CAUMASS, mask=LINE_CAUMASS == 0)  # Mask 0 Values
    maLINE_VDISP = ma.masked_array(LINE_VDISP, mask=LINE_VDISP == 0)  # Mask 0 Values

    ### Mass Fractions ###
    # Note: I was using map() as an iterator, but for N = 5, sometimes there are less than 3 non-masked values per los
    # Note: and biweight###() does not take less than 4 unique values. I don't yet know how to incorporate a "try:"
    # Note: statement into an iterator function like map(), so I resort to a "for" loop
    ## Ensemble fractions
    stack_mfrac = ma.log(ENC_CAUMASS / M_crit200)
    stack_hvdfrac = ma.log(ENC_VDISP / HVD)
    ## Averaged fractions
    a_size = halo_num  # This becomes line_num if doing vertical average first!!
    avg_mfrac, avg_hvdfrac = zeros(a_size), zeros(a_size)
    for a in range(a_size):
        try:
            avg_mfrac[a] = astStats.biweightLocation(ma.copy(ma.log(maLINE_CAUMASS[a] / M_crit200[a])), 6.0)
            avg_hvdfrac[a] = astStats.biweightLocation(ma.copy(ma.log(maLINE_VDISP[a] / HVD[a])), 6.0)
        except:
            avg_mfrac[a] = ma.mean(ma.log(maLINE_CAUMASS[a] / M_crit200[a]))
            avg_hvdfrac[a] = ma.mean(ma.log(maLINE_VDISP[a] / M_crit200[a]))
            # Bias and Scatter for Ensemble and LOS Average Systems
    stack_mbias, stack_mscat = (
        astStats.biweightLocation(ma.copy(stack_mfrac), 6.0),
        astStats.biweightScale(ma.copy(stack_mfrac), 9.0),
    )
    avg_mbias, avg_mscat = (
        astStats.biweightLocation(ma.copy(avg_mfrac), 6.0),
        astStats.biweightScale(ma.copy(avg_mfrac), 9.0),
    )
    stack_vbias, stack_vscat = (
        astStats.biweightLocation(ma.copy(stack_hvdfrac), 6.0),
        astStats.biweightScale(ma.copy(stack_hvdfrac), 9.0),
    )
    avg_vbias, avg_vscat = (
        astStats.biweightLocation(ma.copy(avg_hvdfrac), 6.0),
        astStats.biweightScale(ma.copy(avg_hvdfrac), 9.0),
    )
예제 #38
0
    def readAll(self):
        """Attempt to read all MetricBundles from disk.

        You must set the metrics/slicer/constraint/runName for a metricBundle appropriately;
        then this method will search for files in the location self.outDir/metricBundle.fileRoot.
        Reads all the files associated with all metricbundles in self.bundleDict.
        """
        reduceBundleDict = {}
        removeBundles = []
        for b in self.bundleDict:
            bundle = self.bundleDict[b]
            filename = os.path.join(self.outDir, bundle.fileRoot + '.npz')
            try:
                # Create a temporary metricBundle to read the data into.
                #  (we don't use b directly, as this overrides plotDict/etc).
                tmpBundle = createEmptyMetricBundle()
                tmpBundle.read(filename)
                # Copy the tmpBundle metricValues into bundle.
                bundle.metricValues = tmpBundle.metricValues
                # And copy the slicer into b, to get slicePoints.
                bundle.slicer = tmpBundle.slicer
                if self.verbose:
                    print('Read %s from disk.' % (bundle.fileRoot))
            except IOError:
                warnings.warn('Warning: file %s not found, bundle not restored.' % filename)
                removeBundles.append(b)

            # Look to see if this is a complex metric, with associated 'reduce' functions,
            # and read those in too.
            if len(bundle.metric.reduceFuncs) > 0:
                origMetricName = bundle.metric.name
                for reduceFunc in bundle.metric.reduceFuncs.values():
                    reduceName = origMetricName + '_' + reduceFunc.__name__.replace('reduce', '')
                    # Borrow the fileRoot in b (we'll reset it appropriately afterwards).
                    bundle.metric.name = reduceName
                    bundle._buildFileRoot()
                    filename = os.path.join(self.outDir, bundle.fileRoot + '.npz')
                    tmpBundle = createEmptyMetricBundle()
                    try:
                        tmpBundle.read(filename)
                        # This won't necessarily recreate the plotDict and displayDict exactly
                        # as they would have been made if you calculated the reduce metric from scratch.
                        # Perhaps update these metric reduce dictionaries after reading them in?
                        newmetricBundle = MetricBundle(metric=bundle.metric, slicer=bundle.slicer,
                                                       constraint=bundle.constraint,
                                                       stackerList=bundle.stackerList, runName=bundle.runName,
                                                       metadata=bundle.metadata,
                                                       plotDict=bundle.plotDict,
                                                       displayDict=bundle.displayDict,
                                                       summaryMetrics=bundle.summaryMetrics,
                                                       mapsList=bundle.mapsList,
                                                       fileRoot=bundle.fileRoot, plotFuncs=bundle.plotFuncs)
                        newmetricBundle.metric.name = reduceName
                        newmetricBundle.metricValues = ma.copy(tmpBundle.metricValues)
                        # Add the new metricBundle to our metricBundleGroup dictionary.
                        name = newmetricBundle.metric.name
                        if name in self.bundleDict:
                            name = newmetricBundle.fileRoot
                        reduceBundleDict[name] = newmetricBundle
                        if self.verbose:
                            print('Read %s from disk.' % (newmetricBundle.fileRoot))
                    except IOError:
                        warnings.warn('Warning: file %s not found, bundle not restored ("reduce" metric).'
                                      % filename)

                    # Remove summaryMetrics from top level metricbundle.
                    bundle.summaryMetrics = []
                    # Update parent MetricBundle name.
                    bundle.metric.name = origMetricName
                    bundle._buildFileRoot()

        # Add the reduce bundles into the bundleDict.
        self.bundleDict.update(reduceBundleDict)
        # And remove the bundles which were not found on disk, so we don't try to make (blank) plots.
        for b in removeBundles:
            del self.bundleDict[b]
예제 #39
0
파일: collector.py 프로젝트: xyt556/argos
    def getSlicedArray(self, copy=True):
        """ Slice the rti using a tuple of slices made from the values of the combo and spin boxes.

            :param copy: If True (the default), a copy is made so that inspectors cannot
                accidentally modify the underlying of the RTIs. You can set copy=False as a
                potential optimization, but only if you are absolutely sure that you don't modify
                the the slicedArray in your inspector! Note that this function calls transpose,
                which can still make a copy of the array for certain permutations.

            :return: ArrayWithMask array with the same number of dimension as the number of
                comboboxes (this can be zero!).

                Returns None if no slice can be made (i.e. the RTI is not sliceable).
            :rtype ArrayWithMask:
        """
        #logger.debug("getSlicedArray() called")

        if not self._rti:
            self.sigShowMessage.emit("No item selected.")
            return None

        if not self.rtiIsSliceable:
            # This is very common so we don't show a message so the user isn't flooded.
            # Also this can have many different causes (compound data, lists, etc_ so it's
            # difficult to show a good, descriptive message.
            return None

        if np.prod(self._rti.arrayShape) == 0:
            self.sigShowMessage.emit("Selected item has zero array elements.")
            return None

        # The dimensions that are selected in the combo boxes will be set to slice(None),
        # the values from the spin boxes will be set as a single integer value
        nDims = self.rti.nDims
        sliceList = [slice(None)] * nDims

        for spinBox in self._spinBoxes:
            dimNr = spinBox.property("dim_nr")
            sliceList[dimNr] = spinBox.value()

        # Make the array slicer. It needs to be a tuple, a list of only integers will be
        # interpreted as an index. With a tuple, array[(exp1, exp2, ..., expN)] is equivalent to
        # array[exp1, exp2, ..., expN].
        # See: http://docs.scipy.org/doc/numpy/reference/arrays.indexing.html
        slicedArray = self.rti[tuple(sliceList)]

        # Make a copy to prevent inspectors from modifying the underlying array.
        if copy:
            if versionStrToTuple(np.__version__) >= (1, 19, 0):
                slicedArray = np.copy(slicedArray,
                                      subok=True)  # Fixes issue #8
            else:
                slicedArray = ma.copy(slicedArray)

        # If there are no comboboxes the sliceList will contain no Slices objects, only ints. Then
        # the resulting slicedArray will be a usually a scalar (only structured fields may yield an
        # array). We convert this scalar to a zero-dimensional Numpy array so that inspectors
        # always get an array (having the same number of dimensions as the dimensionality of the
        # inspector, i.e. the number of comboboxes).
        # Also scalar RTIs, which have nDim == 0, can return a scalar which must be converted.
        # TODO: perhaps always convert to array.
        if self.maxCombos == 0 or self.rti.nDims == 0:
            slicedArray = ma.MaskedArray(slicedArray)

        # Post-condition type check
        check_is_an_array(slicedArray, np.ndarray)

        # Enforce the return type to be a masked array.
        if not isinstance(slicedArray, ma.MaskedArray):
            slicedArray = ma.MaskedArray(slicedArray)

        # Add fake dimensions of length 1 so that result.ndim will equal the number of combo boxes
        # TODO: Perhaps get rid of this because it fails with masked arrays with fill values.
        # The less we do here, the less chance an error occurs. See development/todo.txt
        for dimNr in range(slicedArray.ndim, self.maxCombos):
            #logger.debug("Adding fake dimension: {}".format(dimNr))
            slicedArray = ma.expand_dims(slicedArray, dimNr)

        # Post-condition dimension check
        assert slicedArray.ndim == self.maxCombos, \
            "Bug: getSlicedArray should return a {:d}D array, got: {}D" \
            .format(self.maxCombos, slicedArray.ndim)

        # Convert to ArrayWithMask class for working around issues with the numpy maskedarray
        awm = ArrayWithMask.createFromMaskedArray(slicedArray)
        del slicedArray

        # Shuffle the dimensions to be in the order as specified by the combo boxes
        comboDims = [
            self._comboBoxDimensionIndex(cb) for cb in self._comboBoxes
        ]
        permutations = np.argsort(comboDims)
        logger.debug("slicedArray.shape: {}".format(awm.data.shape))
        logger.debug("Transposing dimensions: {}".format(permutations))
        awm = awm.transpose(permutations)

        awm.checkIsConsistent()

        return awm