def test_values( distr: PiecewiseLinear, target: List[float], expected_target_cdf: List[float], expected_target_crps: List[float], ): target = mx.nd.array(target).reshape(shape=(len(target),)) expected_target_cdf = np.array(expected_target_cdf).reshape( (len(expected_target_cdf),) ) expected_target_crps = np.array(expected_target_crps).reshape( (len(expected_target_crps),) ) assert all(np.isclose(distr.cdf(target).asnumpy(), expected_target_cdf)) assert all(np.isclose(distr.crps(target).asnumpy(), expected_target_crps)) # compare with empirical cdf from samples num_samples = 100_000 samples = distr.sample(num_samples).asnumpy() assert np.isfinite(samples).all() emp_cdf, edges = empirical_cdf(samples) calc_cdf = distr.cdf(mx.nd.array(edges)).asnumpy() assert np.allclose(calc_cdf[1:, :], emp_cdf, atol=1e-2)
def test_values( distr: PiecewiseLinear, target: List[float], expected_target_cdf: List[float], expected_target_crps: List[float], ): target = mx.nd.array(target).reshape(shape=(len(target), )) expected_target_cdf = np.array(expected_target_cdf).reshape( (len(expected_target_cdf), )) expected_target_crps = np.array(expected_target_crps).reshape( (len(expected_target_crps), )) assert all(np.isclose(distr._cdf(target).asnumpy(), expected_target_cdf)) assert all(np.isclose(distr.crps(target).asnumpy(), expected_target_crps))
def test_simple_symmetric(): gamma = mx.nd.array([-1.0]) slopes = mx.nd.array([[2.0, 2.0]]) knot_spacings = mx.nd.array([[0.5, 0.5]]) distr = PiecewiseLinear(gamma=gamma, slopes=slopes, knot_spacings=knot_spacings) assert distr.cdf(mx.nd.array([-2.0])).asnumpy().item() == 0.0 assert distr.cdf(mx.nd.array([+2.0])).asnumpy().item() == 1.0 expected_crps = np.array([1.0 + 2.0 / 3.0]) assert np.allclose( distr.crps(mx.nd.array([-2.0])).asnumpy(), expected_crps) assert np.allclose(distr.crps(mx.nd.array([2.0])).asnumpy(), expected_crps)
def test_shapes(batch_shape: Tuple, num_pieces: int, num_samples: int, serialize_fn): gamma = mx.nd.ones(shape=(*batch_shape, )) slopes = mx.nd.ones(shape=(*batch_shape, num_pieces)) # all positive knot_spacings = (mx.nd.ones(shape=(*batch_shape, num_pieces)) / num_pieces ) # positive and sum to 1 target = mx.nd.ones(shape=batch_shape) # shape of gamma distr = PiecewiseLinear(gamma=gamma, slopes=slopes, knot_spacings=knot_spacings) distr = serialize_fn(distr) # assert that the parameters and target have proper shapes assert gamma.shape == target.shape assert knot_spacings.shape == slopes.shape assert len(gamma.shape) + 1 == len(knot_spacings.shape) # assert that batch_shape is computed properly assert distr.batch_shape == batch_shape # assert that shapes of original parameters are correct assert distr.b.shape == slopes.shape assert distr.knot_positions.shape == knot_spacings.shape # assert that the shape of crps is correct assert distr.crps(target).shape == batch_shape # assert that the quantile shape is correct when computing the quantile values at knot positions - used for a_tilde assert distr.quantile_internal(knot_spacings, axis=-2).shape == ( *batch_shape, num_pieces, ) # assert that the samples and the quantile values shape when num_samples is None is correct samples = distr.sample() assert samples.shape == batch_shape assert distr.quantile_internal(samples).shape == batch_shape # assert that the samples and the quantile values shape when num_samples is not None is correct samples = distr.sample(num_samples) assert samples.shape == (num_samples, *batch_shape) assert distr.quantile_internal(samples, axis=0).shape == ( num_samples, *batch_shape, )
def test_piecewise_linear( gamma: float, slopes: np.ndarray, knot_spacings: np.ndarray, hybridize: bool, ) -> None: ''' Test to check that minimizing the CRPS recovers the quantile function ''' num_samples = 500 # use a few samples for timeout failure gammas = mx.nd.zeros((num_samples, )) + gamma slopess = mx.nd.zeros((num_samples, len(slopes))) + mx.nd.array(slopes) knot_spacingss = mx.nd.zeros( (num_samples, len(knot_spacings))) + mx.nd.array(knot_spacings) pwl_sqf = PiecewiseLinear(gammas, slopess, knot_spacingss) samples = pwl_sqf.sample() # Parameter initialization gamma_init = gamma - START_TOL_MULTIPLE * TOL * gamma slopes_init = slopes - START_TOL_MULTIPLE * TOL * slopes knot_spacings_init = knot_spacings # We perturb knot spacings such that even after the perturbation they sum to 1. mid = len(slopes) // 2 knot_spacings_init[:mid] = (knot_spacings[:mid] - START_TOL_MULTIPLE * TOL * knot_spacings[:mid]) knot_spacings_init[mid:] = (knot_spacings[mid:] + START_TOL_MULTIPLE * TOL * knot_spacings[mid:]) init_biases = [gamma_init, slopes_init, knot_spacings_init] # check if it returns original parameters of mapped gamma_hat, slopes_hat, knot_spacings_hat = maximum_likelihood_estimate_sgd( PiecewiseLinearOutput(len(slopes)), samples, init_biases=init_biases, hybridize=hybridize, learning_rate=PositiveFloat(0.01), num_epochs=PositiveInt(20), ) # Since the problem is highly non-convex we may not be able to recover the exact parameters # Here we check if the estimated parameters yield similar function evaluations at different quantile levels. quantile_levels = np.arange(0.1, 1.0, 0.1) # create a LinearSplines instance with the estimated parameters to have access to .quantile pwl_sqf_hat = PiecewiseLinear( mx.nd.array(gamma_hat), mx.nd.array(slopes_hat).expand_dims(axis=0), mx.nd.array(knot_spacings_hat).expand_dims(axis=0), ) # Compute quantiles with the estimated parameters quantiles_hat = np.squeeze( pwl_sqf_hat.quantile(mx.nd.array(quantile_levels).expand_dims(axis=0), axis=1).asnumpy()) # Compute quantiles with the original parameters # Since params is replicated across samples we take only the first entry quantiles = np.squeeze( pwl_sqf.quantile( mx.nd.array(quantile_levels).expand_dims(axis=0).repeat( axis=0, repeats=num_samples), axis=1, ).asnumpy()[0, :]) for ix, (quantile, quantile_hat) in enumerate(zip(quantiles, quantiles_hat)): assert np.abs(quantile_hat - quantile) < TOL * quantile, ( f"quantile level {quantile_levels[ix]} didn't match:" f" " f"q = {quantile}, q_hat = {quantile_hat}")
import pytest import mxnet as mx import numpy as np from gluonts.distribution import PiecewiseLinear from gluonts.testutil import empirical_cdf @pytest.mark.parametrize( "distr, target, expected_target_cdf, expected_target_crps", [ ( PiecewiseLinear( gamma=mx.nd.ones(shape=(1, )), slopes=mx.nd.array([2, 3, 1]).reshape(shape=(1, 3)), knot_spacings=mx.nd.array([0.3, 0.4, 0.3 ]).reshape(shape=(1, 3)), ), [2.2], [0.5], [0.223000], ), ( PiecewiseLinear( gamma=mx.nd.ones(shape=(2, )), slopes=mx.nd.array([[1, 1], [1, 2]]).reshape(shape=(2, 2)), knot_spacings=mx.nd.array([[0.4, 0.6], [0.4, 0.6] ]).reshape(shape=(2, 2)), ), [1.5, 1.6], [0.5, 0.5],
), (3, 4, 5), (), ), ( Uniform( low=-mx.nd.ones(shape=(3, 4, 5)), high=mx.nd.ones(shape=(3, 4, 5)), ), (3, 4, 5), (), ), ( PiecewiseLinear( gamma=mx.nd.ones(shape=(3, 4, 5)), slopes=mx.nd.ones(shape=(3, 4, 5, 10)), knot_spacings=mx.nd.ones(shape=(3, 4, 5, 10)) / 10, ), (3, 4, 5), (), ), ( MixtureDistribution( mixture_probs=mx.nd.stack( 0.2 * mx.nd.ones(shape=(3, 1, 5)), 0.8 * mx.nd.ones(shape=(3, 1, 5)), axis=-1, ), components=[ Gaussian( mu=mx.nd.zeros(shape=(3, 4, 5)),