Beispiel #1
0
# We'll test this on the California vertical GPS velocity data because it comes with the
# uncertainties
data = vd.datasets.fetch_california_gps()
coordinates = (data.longitude.values, data.latitude.values)

# Use a Mercator projection for our Cartesian gridder
projection = pyproj.Proj(proj="merc", lat_ts=data.latitude.mean())

# Now we can chain a block weighted mean and weighted spline together. We'll use
# uncertainty propagation to calculate the new weights from block mean because our data
# vary smoothly but have different uncertainties.
spacing = 5 / 60  # 5 arc-minutes
chain = vd.Chain(
    [
        ("mean", vd.BlockMean(spacing=spacing * 111e3, uncertainty=True)),
        ("spline", vd.Spline(damping=1e-10)),
    ]
)
print(chain)

# Split the data into a training and testing set. We'll use the training set to grid the
# data and the testing set to validate our spline model. Weights need to
# 1/uncertainty**2 for the error propagation in BlockMean to work.
train, test = vd.train_test_split(
    projection(*coordinates),
    data.velocity_up,
    weights=1 / data.std_up ** 2,
    random_state=0,
)
# Fit the model on the training set
Beispiel #2
0
import numpy as np
import verde as vd

# We'll test this on the California vertical GPS velocity data because it comes with the
# uncertainties
data = vd.datasets.fetch_california_gps()
coordinates = (data.longitude, data.latitude)

# We'll calculate the mean on large blocks to show the effect of the different weighting
# schemes
spacing = 30 / 60
# It's important that the weights are given as 1/sigma**2 for the uncertainty
# propagation. In this case, you should not use verde.variance_to_weights because it
# would normalize the weights.
weights = 1 / data.std_up**2
reducer = vd.BlockMean(spacing, center_coordinates=True)
# First produce the weighted variance weights
variance_weights = reducer.filter(coordinates, data.velocity_up, weights)[-1]
# And now produce the uncertainty propagation weights
reducer.set_params(uncertainty=True)
block_coords, velocity, uncertainty_weights = reducer.filter(
    coordinates, data.velocity_up, weights)

# Now we can plot the different weights side by side on Mercator maps
fig, axes = plt.subplots(1,
                         3,
                         figsize=(13.5, 7),
                         subplot_kw=dict(projection=ccrs.Mercator()))
crs = ccrs.PlateCarree()
titles = ["Variance weights", "Uncertainty weights"]
weight_estimates = [variance_weights, uncertainty_weights]
Beispiel #3
0
)

########################################################################################
# Weights in data decimation
# --------------------------
#
# :class:`~verde.BlockReduce` can't output weights for each data point because it
# doesn't know which reduction operation it's using. If you want to do a weighted
# interpolation, like :class:`verde.Spline`, :class:`~verde.BlockReduce` won't propagate
# the weights to the interpolation function. If your data are relatively smooth, you can
# use :class:`verde.BlockMean` instead to decimated data and produce weights. It can
# calculate different kinds of weights, depending on configuration options and what you
# give it as input.
#
# Let's explore all of the possibilities.
mean = vd.BlockMean(spacing=15 / 60)
print(mean)

########################################################################################
# Option 1: No input weights
# ++++++++++++++++++++++++++
#
# In this case, we'll get a standard mean and the output weights will be 1 over the
# variance of the data in each block:
#
# .. math::
#
#     \bar{d} = \dfrac{\sum\limits_{i=1}^N d_i}{N}
#     \: , \qquad
#     \sigma^2 = \dfrac{\sum\limits_{i=1}^N (d_i - \bar{d})^2}{N}
#     \: , \qquad