Пример #1
0
def test_robustness():
    distr_out = PiecewiseLinearOutput(num_pieces=10)
    args_proj = distr_out.get_args_proj()
    args_proj.initialize()

    net_out = mx.nd.random.normal(shape=(1000, 30), scale=1e2)
    gamma, slopes, knot_spacings = args_proj(net_out)
    distr = distr_out.distribution((gamma, slopes, knot_spacings))

    # compute the 1-quantile (the 0-quantile is gamma)
    sup_support = gamma + mx.nd.sum(slopes * knot_spacings, axis=-1)

    assert mx.nd.broadcast_lesser_equal(gamma, sup_support).asnumpy().all()

    width = sup_support - gamma
    x = mx.random.uniform(low=gamma - width, high=sup_support + width)

    # check that 0 < cdf < 1
    cdf_x = distr.cdf(x)
    assert (mx.nd.min(cdf_x).asscalar() >= 0.0
            and mx.nd.max(cdf_x).asscalar() <= 1.0)

    # check that 0 <= crps
    crps_x = distr.crps(x)
    assert mx.nd.min(crps_x).asscalar() >= 0.0
Пример #2
0
def test_fkpwl_distr_output_same_as_pl():
    x = mx.nd.array([[0.1, 0.5], [0.3, 0.99], [0.2, 0.6]])

    gamma = mx.nd.array([0.0, 2.0])
    slopes = mx.nd.array([[0.1, 0.5, 0.9], [2.0, 0.1, 5.0]])
    knot_spacings = mx.nd.array([[0.1, 0.5, 0.4], [0.1, 0.5, 0.4]])

    pl_output = PiecewiseLinearOutput(num_pieces=3)
    pl_dist = pl_output.distribution([gamma, slopes, knot_spacings])

    fkpl = FixedKnotsPiecewiseLinearOutput([0.1, 0.6],).distribution(
        [gamma, slopes, knot_spacings]
    )

    assert np.allclose(pl_dist.crps(x).asnumpy(), fkpl.crps(x).asnumpy(),)
def test_piecewise_linear(
    gamma: float,
    slopes: np.ndarray,
    knot_spacings: np.ndarray,
    hybridize: bool,
) -> None:
    """
    Test to check that minimizing the CRPS recovers the quantile function
    """
    num_samples = 500  # use a few samples for timeout failure

    gammas = mx.nd.zeros((num_samples, )) + gamma
    slopess = mx.nd.zeros((num_samples, len(slopes))) + mx.nd.array(slopes)
    knot_spacingss = mx.nd.zeros(
        (num_samples, len(knot_spacings))) + mx.nd.array(knot_spacings)

    pwl_sqf = PiecewiseLinear(gammas, slopess, knot_spacingss)

    samples = pwl_sqf.sample()

    # Parameter initialization
    gamma_init = gamma - START_TOL_MULTIPLE * TOL * gamma
    slopes_init = slopes - START_TOL_MULTIPLE * TOL * slopes
    knot_spacings_init = knot_spacings
    # We perturb knot spacings such that even after the perturbation they sum to 1.
    mid = len(slopes) // 2
    knot_spacings_init[:mid] = (knot_spacings[:mid] -
                                START_TOL_MULTIPLE * TOL * knot_spacings[:mid])
    knot_spacings_init[mid:] = (knot_spacings[mid:] +
                                START_TOL_MULTIPLE * TOL * knot_spacings[mid:])

    init_biases = [gamma_init, slopes_init, knot_spacings_init]

    # check if it returns original parameters of mapped
    gamma_hat, slopes_hat, knot_spacings_hat = maximum_likelihood_estimate_sgd(
        PiecewiseLinearOutput(len(slopes)),
        samples,
        init_biases=init_biases,
        hybridize=hybridize,
        learning_rate=PositiveFloat(0.01),
        num_epochs=PositiveInt(20),
    )

    # Since the problem is highly non-convex we may not be able to recover the exact parameters
    # Here we check if the estimated parameters yield similar function evaluations at different quantile levels.
    quantile_levels = np.arange(0.1, 1.0, 0.1)

    # create a LinearSplines instance with the estimated parameters to have access to .quantile
    pwl_sqf_hat = PiecewiseLinear(
        mx.nd.array(gamma_hat),
        mx.nd.array(slopes_hat).expand_dims(axis=0),
        mx.nd.array(knot_spacings_hat).expand_dims(axis=0),
    )

    # Compute quantiles with the estimated parameters
    quantiles_hat = np.squeeze(
        pwl_sqf_hat.quantile_internal(
            mx.nd.array(quantile_levels).expand_dims(axis=0),
            axis=1).asnumpy())

    # Compute quantiles with the original parameters
    # Since params is replicated across samples we take only the first entry
    quantiles = np.squeeze(
        pwl_sqf.quantile_internal(
            mx.nd.array(quantile_levels).expand_dims(axis=0).repeat(
                axis=0, repeats=num_samples),
            axis=1,
        ).asnumpy()[0, :])

    for ix, (quantile, quantile_hat) in enumerate(zip(quantiles,
                                                      quantiles_hat)):
        assert np.abs(quantile_hat - quantile) < TOL * quantile, (
            f"quantile level {quantile_levels[ix]} didn't match:"
            f" "
            f"q = {quantile}, q_hat = {quantile_hat}")
     mx.nd.random.normal(shape=(3, 4, 5, 6)),
     [None],
     [None, mx.nd.ones(shape=(3, 4, 5))],
     (3, 4, 5),
     (),
 ),
 (
     UniformOutput(),
     mx.nd.random.normal(shape=(3, 4, 5, 6)),
     [None, mx.nd.ones(shape=(3, 4, 5))],
     [None, mx.nd.ones(shape=(3, 4, 5))],
     (3, 4, 5),
     (),
 ),
 (
     PiecewiseLinearOutput(num_pieces=3),
     mx.nd.random.normal(shape=(3, 4, 5, 6)),
     [None, mx.nd.ones(shape=(3, 4, 5))],
     [None, mx.nd.ones(shape=(3, 4, 5))],
     (3, 4, 5),
     (),
 ),
 (
     MixtureDistributionOutput([GaussianOutput(),
                                StudentTOutput()]),
     mx.nd.random.normal(shape=(3, 4, 5, 6)),
     [None, mx.nd.ones(shape=(3, 4, 5))],
     [None, mx.nd.ones(shape=(3, 4, 5))],
     (3, 4, 5),
     (),
 ),