Esempio n. 1
0
def mix_normals(means,
                sigmas,
                weights=None,
                normalize=False,
                axis=None,
                keepdims=False):
    """
    Return the mixture distribution of a sum of random variables.

    See https://en.wikipedia.org/wiki/Mixture_distribution#Moments.

    Arguments:
        means (numpy.ndarray): Variable means
        sigmas (numpy.ndarray): Variable standard deviations
        weights (numpy.ndarray): Variable weights
        normalize (bool): Whether to normalize weights so that they sum to 1
            for non-missing values
    """
    isnan_mean, isnan_sigmas, weights = prepare_normals(
        means, sigmas, weights, normalize, axis)
    wmeans = np.nansum(weights * means, axis=axis, keepdims=True)
    variances = np.nansum(
        weights * (means**2 + sigmas**2), axis=axis, keepdims=True) - wmeans**2
    # np.nansum interprets sum of nans as 0
    isnan = isnan_mean.all(axis=axis, keepdims=True)
    wmeans[isnan] = np.nan
    variances[isnan] = np.nan
    return (numpy_dropdims(wmeans, axis=axis, keepdims=keepdims),
            numpy_dropdims(np.sqrt(variances), axis=axis, keepdims=keepdims))
Esempio n. 2
0
def select_repeat_tracks(runs):
    """
    Return Tracks composed of the best track for each initial point.

    Selects the track for each point that minimizes the temporal mean standard
    deviation for vx + vy, i.e. mean(sqrt(vx_sigma**2 + vy_sigma**2)).

    Arguments:
        runs (iterable): Tracks objects with identical point and time dimensions
    """
    # Compute metric for each track
    metric = np.row_stack([
        np.nanmean(np.sqrt(np.nansum(run.sigmas[..., 3:5]**2, axis=2)), axis=1)
        for run in runs
    ])
    # Choose run with the smallest metric
    selected = np.argmin(metric, axis=0)
    # Merge runs
    means = runs[0].means.copy()
    sigmas = runs[0].sigmas.copy()
    for i, run in enumerate(runs[1:], start=1):
        mask = selected == i
        means[mask, ...] = run.means[mask, ...]
        sigmas[mask, ...] = run.sigmas[mask, ...]
    return glimpse.Tracks(datetimes=runs[0].datetimes,
                          means=means,
                          sigmas=sigmas)
Esempio n. 3
0
def flatten_tracks_doug(runs):
    # Join together second forward and backward runs
    f, r = runs['fv'], runs['rv']
    means = np.column_stack((f.means[..., 3:], r.means[..., 3:]))
    sigmas = np.column_stack((f.sigmas[..., 3:], r.sigmas[..., 3:]))
    # Flatten joined runs
    # Mean: Inverse-variance weighted mean
    # Sigma: Linear combination of weighted correlated random variables
    # (approximation using the weighted mean of the variances)
    weights = sigmas**-2
    weights *= 1 / np.nansum(weights, axis=1, keepdims=True)
    allnan = np.isnan(means).all(axis=1, keepdims=True)
    means = np.nansum(weights * means, axis=1, keepdims=True)
    sigmas = np.sqrt(np.nansum(weights * sigmas**2, axis=1, keepdims=True))
    # np.nansum interprets sum of nans as 0
    means[allnan] = np.nan
    sigmas[allnan] = np.nan
    return means.squeeze(axis=1), sigmas.squeeze(axis=1)
Esempio n. 4
0
def sum_normals(means,
                sigmas,
                weights=None,
                normalize=False,
                correlation=0,
                axis=None,
                keepdims=False):
    """
    Return the mean and sigma of the sum of random variables.

    See https://en.wikipedia.org/wiki/Propagation_of_uncertainty#Linear_combinations.

    Arguments:
        means (numpy.ndarray): Variable means
        sigmas (numpy.ndarray): Variable standard deviations
        weights (numpy.ndarray): Variable weights
        normalize (bool): Whether to normalize weights so that they sum to 1
            for non-missing values
        correlation (float): Correlation to assume between pairs of different variables
    """
    isnan_mean, isnan_sigmas, weights = prepare_normals(
        means, sigmas, weights, normalize, axis)
    wmeans = np.nansum(weights * means, axis=axis, keepdims=True)
    # Initialize variance as sum of diagonal elements
    variances = np.nansum(weights**2 * sigmas**2, axis=axis, keepdims=True)
    # np.nansum interprets sum of nans as 0
    allnan = isnan_mean.all(axis=axis, keepdims=True)
    wmeans[allnan] = np.nan
    variances[allnan] = np.nan
    if correlation:
        # Add off-diagonal elements
        n = means.size if axis is None else means.shape[axis]
        pairs = np.triu_indices(n=n, k=1)
        variances += 2 * np.nansum(
            correlation * np.take(weights, pairs[0], axis=axis) *
            np.take(weights, pairs[1], axis=axis) *
            np.take(sigmas, pairs[0], axis=axis) *
            np.take(sigmas, pairs[1], axis=axis),
            axis=axis,
            keepdims=True)
    return (numpy_dropdims(wmeans, axis=axis, keepdims=keepdims),
            numpy_dropdims(np.sqrt(variances), axis=axis, keepdims=keepdims))
Esempio n. 5
0
         mask, first, last = tracks[i].endpoints()
         last_vxy = tracks[i].vxyz[mask, last, 0:2]
         last_vxy_sigma = tracks[i].vxyz_sigma[mask, last, 0:2]
         vxy_motion_models = [copy.copy(model) for model in motion_models]
         if cylindrical:
             for j, model in enumerate(np.array(vxy_motion_models)[mask]):
                 # Estimate cylindrical priors from cartesian results
                 vxy = last_vxy[j] + last_vxy_sigma[j] * np.random.randn(100, 2)
                 speed = np.hypot(vxy[:, 0], vxy[:, 1])
                 vr, vr_sigma = speed.mean(), speed.std()
                 thetas = np.arctan2(vxy[:, 1], vxy[:, 0])
                 unit_yx = (
                     np.sin(thetas).mean(),
                     np.cos(thetas).mean())
                 theta = np.arctan2(*unit_yx)
                 theta_sigma = np.sqrt(-2 * np.log(np.hypot(*unit_yx)))
                 model.vrthz = np.hstack((vr, theta, model.vrthz[2]))
                 model.vrthz_sigma = np.hstack((vr_sigma, theta_sigma,
                     model.vrthz_sigma[2]))
         else:
             for j, model in enumerate(np.array(vxy_motion_models)[mask]):
                 model.vxyz = np.hstack((last_vxy[j], model.vxyz[2]))
                 model.vxyz_sigma = np.hstack((last_vxy_sigma[j], model.vxyz_sigma[2]))
         # Repeat track
         tracks[i + 1] = tracker.track(motion_models=vxy_motion_models,
             observer_mask=params['observer_mask'], tile_size=tile_size,
             parallel=parallel, datetimes=tracker.datetimes[::directions[i + 1]])
 # ---- Clean up tracks ----
 # Clean up tracker, since saved in Tracks.tracker
 tracker.reset()
 # Clear cached images, since saved in Tracks.tracker.observers
Esempio n. 6
0
# few_obs = (nobservers < min_observers) | (dx != 0) | (dy != 0)
few_obs = (nobservers < min_observers)
# nobservers = nobservers.astype(float)
# nobservers[nobservers == 0] = np.nan
# few_obs |= (scipy.ndimage.minimum_filter(nobservers, size=(3, 3, 1)) < min_observers)
vx[few_obs] = np.nan
vy[few_obs] = np.nan
vz[few_obs] = np.nan
extension_x[few_obs] = np.nan
extension_y[few_obs] = np.nan
compression_x[few_obs] = np.nan
compression_y[few_obs] = np.nan
flotation[few_obs] = np.nan

# Compute speeds
speeds = np.sqrt(vx**2 + vy**2)

# ---- Animate speeds ----

i = 0
fig = matplotlib.pyplot.figure(tight_layout=True, figsize=(12, 8))
ax = matplotlib.pyplot.gca()
ax.axis('off')
ax.set_aspect(1)
im = ax.imshow(speeds[indices][..., i],
               vmin=0,
               vmax=20,
               extent=(raster.xlim[0], raster.xlim[1], raster.ylim[1],
                       raster.ylim[0]))
ax.set_xlim(ax.get_xlim())
ax.set_ylim(ax.get_ylim())
Esempio n. 7
0
    dem = glimpse.Raster.read(path,
        xlim=dem_template.xlim + np.array((-1, 1)) * grid_size,
        ylim=dem_template.ylim + np.array((1, -1)) * grid_size,
        d=grid_size)
    dem.crop(zlim=zlim)
    z = dem.sample(dem_points, order=1, bounds_error=False).reshape(dem_template.shape)
    dem = glimpse.Raster(z, x=dem_template.xlim, y=dem_template.ylim, datetime=t)
    # Cache dem type and glacier polygon
    dem.type = demtype
    dem.polygon = cg.load_glacier_polygon(t=dem.datetime, demtype=dem.type)
    # Mask forebay
    forebay = cg.load_forebay_polygon(glacier=dem.polygon)
    mask = dem.rasterize_poygons([forebay])
    dem.Z[mask | base_mask] = np.nan
    dem.fill_crevasses(**fill_crevasses_args)
    dem.Z[mask | base_mask] = np.nan
    # Use base DEM outside glacier
    dem.Z[base_mask] = base_dem.Z[base_mask]
    dem.Z[mask] = np.nan
    # Add to results
    means.append(dem)
    sigma = np.sqrt(dem_sigmas[demtype]**2 + surface_sigma**2)
    sigmas.append(sigma)

# Initialize interpolant
dem_interpolant = glimpse.RasterInterpolant(means=means, sigmas=sigmas,
    x=[dem.datetime for dem in means])

# Write to file
glimpse.helpers.write_pickle(dem_interpolant, dem_interpolant_path)
Esempio n. 8
0
    fsigmas[mask, dim] = s[is_single_median]
    # Multiple median: Take unweighted average (correlation = 1)
    is_multiple_median = is_median & ~is_single_median
    mask = is_multiple_median.any(axis=1)
    fmeans[mask, dim] = medians.squeeze(axis=1)[mask]
    if exact_median_variance:
        # "Exact" variance
        n = s.shape[1]
        pairs = np.triu_indices(n=n, k=1)
        sqweights = 1 / np.sum(is_multiple_median, axis=1, keepdims=False)**2
        variances = sqweights * np.nansum(s**2, axis=1, keepdims=False)
        variances += 2 * sqweights * np.nansum(
            np.take(s, pairs[0], axis=1) * np.take(s, pairs[1], axis=1),
            axis=1,
            keepdims=False)
        fsigmas[mask, dim] = np.sqrt(variances[mask])
    else:
        # Approximate variance using the mean of the variances
        fsigmas[mask, dim] = np.sqrt(np.nanmean(s**2, axis=1)[mask])

# Write files
glimpse.helpers.write_pickle(ti, os.path.join(arrays_path, 'ti.pkl'))
glimpse.helpers.write_pickle(fmeans, os.path.join(arrays_path, 'means.pkl'))
glimpse.helpers.write_pickle(fsigmas, os.path.join(arrays_path, 'sigmas.pkl'))
nobservers[np.isnan(means[..., 0])] = 0
glimpse.helpers.write_pickle(nobservers,
                             os.path.join(arrays_path, 'nobservers.pkl'))
glimpse.helpers.write_pickle(flotation,
                             os.path.join(arrays_path, 'flotation.pkl'))
# (xyi)
xy = glimpse.helpers.grid_to_points((template.X, template.Y))[ids]