Пример #1
0
def calc_stc(cum, gpu=False):
    """
    Calculate STC (spatio-temporal consistensy; Hanssen et al., 2008,
    Terrafirma) of time series of displacement.
    Note that isolated pixels (which have no surrounding pixel) have nan of STC.

    Input:
      cum  : Cumulative displacement (n_im, length, width)
      gpu  : GPU flag

    Return:
      stc  : STC (length, width)
    """
    if gpu:
        import cupy as xp
        cum = xp.asarray(cum)
    else:
        xp = np

    n_im, length, width = cum.shape

    ### Add 1 pixel margin to cum data filled with nan
    cum1 = xp.ones((n_im, length + 2, width + 2), dtype=xp.float32) * xp.nan
    cum1[:, 1:length + 1, 1:width + 1] = cum

    ### Calc STC for surrounding 8 pixels
    _stc = xp.ones((length, width, 8), dtype=xp.float32) * xp.nan
    pixels = [[0, 0], [0, 1], [0, 2], [1, 0], [1, 2], [2, 0], [2, 1], [2, 2]]
    ## Left Top = [0, 0], Rigth Bottmon = [2, 2], Center = [1, 1]

    for i, pixel in enumerate(pixels):
        ### Spatial difference (surrounding pixel-center)
        d_cum = cum1[:, pixel[0]:length + pixel[0], pixel[1]:width +
                     pixel[1]] - cum1[:, 1:length + 1, 1:width + 1]

        ### Temporal difference (double difference)
        dd_cum = d_cum[:-1, :, :] - d_cum[1:, :, :]

        ### STC (i.e., RMS of DD)
        sumsq_dd_cum = xp.nansum(dd_cum**2, axis=0)
        n_dd_cum = (xp.sum(~xp.isnan(dd_cum),
                           axis=0)).astype(xp.float32)  #nof non-nan
        n_dd_cum[n_dd_cum == 0] = xp.nan  #to avoid 0 division
        _stc[:, :, i] = xp.sqrt(sumsq_dd_cum / n_dd_cum)

    ### Strange but some adjacent pixels can have identical time series,
    ### resulting in 0 of stc. To avoid this, replace 0 with nan.
    _stc[_stc == 0] = xp.nan

    ### Identify minimum value as final STC
    with warnings.catch_warnings():  ## To silence warning by All-Nan slice
        warnings.simplefilter('ignore', RuntimeWarning)
        stc = xp.nanmin(_stc, axis=2)

    if gpu:
        stc = xp.asnumpy(stc)
        del cum, cum1, _stc, d_cum, dd_cum, sumsq_dd_cum, n_dd_cum

    return stc
Пример #2
0
def _masked_column_mean(arr, masked_value):
    """Compute the mean of each column in the 2D array arr, ignoring any
    instances of masked_value"""
    mask = _get_mask(arr, masked_value)
    count_missing_values = mask.sum(axis=0)
    n_elems = arr.shape[0] - count_missing_values
    mean = cp.nansum(arr, axis=0)
    if not cp.isnan(masked_value):
        mean -= (count_missing_values * masked_value)
    mean /= n_elems
    return mean
Пример #3
0
 def test_nansum_axis_float16(self):
     # Note that the above test example overflows in float16. We use a
     # smaller array instead, return True if array is too large.
     if (numpy.prod(self.shape) > 24):
         return True
     a = testing.shaped_arange(self.shape, dtype='e')
     a[:, 1] = cupy.nan
     sa = cupy.nansum(a, axis=1)
     b = testing.shaped_arange(self.shape, numpy, dtype='f')
     b[:, 1] = numpy.nan
     sb = numpy.nansum(b, axis=1)
     testing.assert_allclose(sa, sb.astype('e'))
Пример #4
0
    def _dense_fit(self, X, strategy, missing_values, fill_value):
        """Fit the transformer on dense data."""
        mask = _get_mask(X, missing_values)

        # Mean
        if strategy == "mean":
            count_missing_values = mask.sum(axis=0)
            n_elems = X.shape[0] - count_missing_values
            mean = np.nansum(X, axis=0)
            mean -= (count_missing_values * missing_values)
            mean /= n_elems
            return mean

        # Median
        elif strategy == "median":
            count_missing_values = mask.sum(axis=0)
            n_elems = X.shape[0] - count_missing_values
            middle, is_odd = np.divmod(n_elems, 2)
            is_odd = is_odd.astype(np.bool)
            middle += count_missing_values
            X_sorted = X.copy()
            X_sorted[mask] = np.nan
            X_sorted = np.sort(X, axis=0)
            median = np.empty(X.shape[1], dtype=X.dtype)
            wis_odd = np.argwhere(is_odd).squeeze()
            wnot_odd = np.argwhere(~is_odd).squeeze()
            median[wis_odd] = X_sorted[middle[wis_odd], wis_odd]
            elm1 = X_sorted[middle[wnot_odd] - 1, wnot_odd]
            elm2 = X_sorted[middle[wnot_odd], wnot_odd]
            median[wnot_odd] = (elm1 + elm2) / 2.
            return median

        # Most frequent
        elif strategy == "most_frequent":
            n_features = X.shape[1]
            most_frequent = cpu_np.empty(n_features, dtype=X.dtype)
            for i in range(n_features):
                feature_mask_idxs = np.where(~mask[:, i])[0]
                values, counts = np.unique(X[feature_mask_idxs, i],
                                           return_counts=True)
                count_max = counts.max()
                if count_max > 0:
                    value = values[counts == count_max].min()
                else:
                    value = np.nan
                most_frequent[i] = value
            return np.array(most_frequent)

        # Constant
        elif strategy == "constant":
            return np.full(X.shape[1], fill_value, dtype=X.dtype)
Пример #5
0
def _csc_mean_variance_axis0(X):
    """Compute mean, variance and nans count on the axis 0 of a CSC matrix

    Parameters
    ----------
    X : sparse CSC matrix
        Input array

    Returns
    -------
    mean, variance, nans count
    """
    n_samples, n_features = X.shape

    means = cp.empty(n_features)
    variances = cp.empty(n_features)
    counts_nan = cp.empty(n_features)

    start = X.indptr[0]
    for i, end in enumerate(X.indptr[1:]):
        col = X.data[start:end]

        _count_zeros = n_samples - col.size
        _count_nans = (col != col).sum()

        _mean = cp.nansum(col) / (n_samples - _count_nans)
        _variance = cp.nansum((col - _mean)**2)
        _variance += _count_zeros * (_mean**2)
        _variance /= (n_samples - _count_nans)

        means[i] = _mean
        variances[i] = _variance
        counts_nan[i] = _count_nans

        start = end
    return means, variances, counts_nan
Пример #6
0
 def test_nansum_out_wrong_shape(self):
     a = testing.shaped_arange(self.shape)
     a[:, 1] = cupy.nan
     b = cupy.empty((2, 3))
     with self.assertRaises(ValueError):
         cupy.nansum(a, axis=1, out=b)
Пример #7
0
    def _acausal_classifier_gpu(filter_posterior, movement_state_transition,
                                discrete_state_transition, observed_position_bin,
                                uniform):
        '''
        Parameters
        ----------
        filter_posterior : ndarray, shape (n_time, 2, n_position_bins)
        movement_state_transition : ndarray, shape (n_position_bins,
                                                    n_position_bins)
        discrete_state_transition : ndarray, shape (n_time, 2)
            discrete_state_transition[k, 0] = Pr(I_{k} = 1 | I_{k-1} = 0, v_{k})
            discrete_state_transition[k, 1] = Pr(I_{k} = 1 | I_{k-1} = 1, v_{k})
        observed_position_bin : ndarray, shape (n_time,)
            Which position bin is the animal in.
        position_bin_size : float

        Returns
        -------
        smoother_posterior : ndarray, shape (n_time, 2, n_position_bins)
            p(x_{k + 1}, I_{k + 1} \vert H_{1:T})
        smoother_probability : ndarray, shape (n_time, 2)
            smoother_probability[:, 0] = Pr(I_{1:T} = 0)
            smoother_probability[:, 1] = Pr(I_{1:T} = 1)
        smoother_prior : ndarray, shape (n_time, 2, n_position_bins)
            p(x_{k + 1}, I_{k + 1} \vert H_{1:k})
        weights : ndarray, shape (n_time, 2, n_position_bins)
            \sum_{I_{k+1}} \int \Big[ \frac{p(x_{k+1} \mid x_{k}, I_{k}, I_{k+1}) *
            Pr(I_{k + 1} \mid I_{k}, v_{k}) * p(x_{k+1}, I_{k+1} \mid H_{1:T})}
            {p(x_{k + 1}, I_{k + 1} \mid H_{1:k})} \Big] dx_{k+1}
        '''  # noqa

        filter_posterior = cp.asarray(filter_posterior, dtype=cp.float32)
        movement_state_transition = cp.asarray(
            movement_state_transition, dtype=cp.float32)
        discrete_state_transition = cp.asarray(
            discrete_state_transition, dtype=cp.float32)
        observed_position_bin = cp.asarray(observed_position_bin)
        uniform = cp.asarray(uniform, dtype=cp.float32)
        EPS = cp.asarray(np.spacing(1), dtype=cp.float32)

        filter_probability = cp.sum(filter_posterior, axis=2)

        smoother_posterior = cp.zeros_like(filter_posterior)
        n_time, _, n_position_bins = filter_posterior.shape

        smoother_posterior[-1] = filter_posterior[-1].copy()

        for k in cp.arange(n_time - 2, -1, -1):
            smoother_prior = cp.zeros((2, n_position_bins), dtype=cp.float32)
            weights = cp.zeros((2, n_position_bins), dtype=cp.float32)

            position_ind = observed_position_bin[k + 1]

            # Predict p(x_{k + 1}, I_{k + 1} \vert H_{1:k})
            # I_{k} = 0, I_{k + 1} = 0
            smoother_prior[0, position_ind] = (
                (1 - discrete_state_transition[k + 1, 0]) * filter_probability[k, 0])

            # I_{k} = 1, I_{k + 1} = 0
            smoother_prior[0, position_ind] += (
                (1 - discrete_state_transition[k + 1, 1]) * filter_probability[k, 1])

            # I_{k} = 0, I_{k + 1} = 1
            smoother_prior[1] = (
                discrete_state_transition[k + 1, 0] * uniform *
                filter_probability[k, 0])

            # I_{k} = 1, I_{k + 1} = 1
            smoother_prior[1] += (
                discrete_state_transition[k + 1, 1] *
                (movement_state_transition.T @ filter_posterior[k, 1]))

            # Update p(x_{k}, I_{k} \vert H_{1:k})
            ratio = cp.exp(
                cp.log(smoother_posterior[k + 1]) -
                cp.log(smoother_prior + EPS))
            integrated_ratio = cp.sum(ratio, axis=1)
            # I_{k} = 0, I_{k + 1} = 0
            weights[0] = (
                (1 - discrete_state_transition[k + 1, 0]) * ratio[0, position_ind])

            # I_{k} = 0, I_{k + 1} = 1
            weights[0] += (
                uniform * discrete_state_transition[k + 1, 0] * integrated_ratio[1])

            # I_{k} = 1, I_{k + 1} = 0
            weights[1] = (
                (1 - discrete_state_transition[k + 1, 1]) * ratio[0, position_ind])

            # I_{k} = 1, I_{k + 1} = 1
            weights[1] += (
                discrete_state_transition[k + 1, 1] *
                ratio[1] @ movement_state_transition)

            smoother_posterior[k] = weights * filter_posterior[k]
            smoother_posterior[k] /= cp.nansum(smoother_posterior[k])

        smoother_probability = cp.sum(smoother_posterior, axis=2)

        return (cp.asnumpy(smoother_posterior),
                cp.asnumpy(smoother_probability))
Пример #8
0
    def _causal_classifier_gpu(likelihood, movement_state_transition, discrete_state_transition,
                               observed_position_bin, uniform):
        '''
        Parameters
        ----------
        likelihood : ndarray, shape (n_time, ...)
        movement_state_transition : ndarray, shape (n_position_bins,
                                                    n_position_bins)
        discrete_state_transition : ndarray, shape (n_time, 2)
            discrete_state_transition[k, 0] = Pr(I_{k} = 1 | I_{k-1} = 0, v_{k})
            discrete_state_transition[k, 1] = Pr(I_{k} = 1 | I_{k-1} = 1, v_{k})
        observed_position_bin : ndarray, shape (n_time,)
            Which position bin is the animal in.
        position_bin_size : float

        Returns
        -------
        posterior : ndarray, shape (n_time, 2, n_position_bins)
        state_probability : ndarray, shape (n_time, 2)
            state_probability[:, 0] = Pr(I_{1:T} = 0)
            state_probability[:, 1] = Pr(I_{1:T} = 1)
        prior : ndarray, shape (n_time, 2, n_position_bins)

        '''

        likelihood = cp.asarray(likelihood, dtype=cp.float32)
        movement_state_transition = cp.asarray(
            movement_state_transition, dtype=cp.float32)
        discrete_state_transition = cp.asarray(
            discrete_state_transition, dtype=cp.float32)
        observed_position_bin = cp.asarray(observed_position_bin)
        uniform = cp.asarray(uniform, dtype=cp.float32)

        n_position_bins = movement_state_transition.shape[0]
        n_time = likelihood.shape[0]
        n_states = 2

        posterior = cp.zeros(
            (n_time, n_states, n_position_bins), dtype=cp.float32)
        state_probability = cp.zeros((n_time, n_states), dtype=cp.float32)

        # Initial Conditions
        posterior[0, 0, observed_position_bin[0]] = likelihood[0, 0, 0]
        norm = cp.nansum(posterior[0])
        data_log_likelihood = cp.log(norm)
        posterior[0] /= norm
        state_probability[0] = cp.sum(posterior[0], axis=1)

        for k in np.arange(1, n_time):
            prior = cp.zeros((n_states, n_position_bins), dtype=cp.float32)
            position_ind = observed_position_bin[k]
            # I_{k - 1} = 0, I_{k} = 0
            prior[0, position_ind] = (
                (1 - discrete_state_transition[k, 0]) * state_probability[k - 1, 0])
            # I_{k - 1} = 1, I_{k} = 0
            prior[0, position_ind] += (
                (1 - discrete_state_transition[k, 1]) * state_probability[k - 1, 1])

            # I_{k - 1} = 0, I_{k} = 1
            prior[1] = (discrete_state_transition[k, 0] * uniform *
                        state_probability[k - 1, 0])
            # I_{k - 1} = 1, I_{k} = 1
            prior[1] += (
                discrete_state_transition[k, 1] *
                (movement_state_transition.T @ posterior[k - 1, 1]))

            posterior[k] = prior * likelihood[k]
            norm = cp.nansum(posterior[k])
            data_log_likelihood += cp.log(norm)
            posterior[k] /= norm

            state_probability[k] = cp.sum(posterior[k], axis=1)

        return (cp.asnumpy(posterior),
                cp.asnumpy(state_probability),
                data_log_likelihood)
Пример #9
0
 def _cov_pairwise(x1, x2, factor):
     return cupy.nansum(x1 * x2, axis=1, keepdims=True) * cupy.true_divide(
         1, factor)