Beispiel #1
0
    def test_bad_size(self):

        R = MRG_RandomStream(234)

        for size in [
            (0, 100),
            (-1, 100),
            (1, 0),
        ]:

            with pytest.raises(ValueError):
                R.uniform(size)
            with pytest.raises(ValueError):
                R.binomial(size)
            with pytest.raises(ValueError):
                R.multinomial(size, 1, [])
            with pytest.raises(ValueError):
                R.normal(size)
            with pytest.raises(ValueError):
                R.truncated_normal(size)
Beispiel #2
0
def test_target_parameter():
    srng = MRG_RandomStream()
    pvals = np.array([[0.98, 0.01, 0.01], [0.01, 0.49, 0.50]])

    def basic_target_parameter_test(x):
        f = function([], x)
        assert isinstance(f(), np.ndarray)

    basic_target_parameter_test(srng.uniform((3, 2), target="cpu"))
    basic_target_parameter_test(srng.normal((3, 2), target="cpu"))
    basic_target_parameter_test(srng.truncated_normal((3, 2), target="cpu"))
    basic_target_parameter_test(srng.binomial((3, 2), target="cpu"))
    basic_target_parameter_test(
        srng.multinomial(pvals=pvals.astype("float32"), target="cpu"))
    basic_target_parameter_test(
        srng.choice(p=pvals.astype("float32"), replace=False, target="cpu"))
    basic_target_parameter_test(
        srng.multinomial_wo_replacement(pvals=pvals.astype("float32"),
                                        target="cpu"))
Beispiel #3
0
def test_normal_truncation():
    # just a copy of test_normal0 with extra bound check
    steps = 50
    std = 2.0
    # standard deviation is slightly less than for a regular Gaussian
    # constant taken from scipy.stats.truncnorm.std(a=-2, b=2, loc=0., scale=1.)
    target_std = 0.87962566103423978 * std

    if (config.mode in ("DEBUG_MODE", "DebugMode", "FAST_COMPILE")
            or config.mode == "Mode" and config.linker in ["py"]):
        sample_size = (25, 30)
        default_rtol = 0.02
    else:
        sample_size = (999, 50)
        default_rtol = 0.01
    sample_size_odd = (sample_size[0], sample_size[1] - 1)
    x = matrix()

    test_cases = [
        (sample_size, sample_size, [], [], -5.0, default_rtol, default_rtol),
        (
            x.shape,
            sample_size,
            [x],
            [np.zeros(sample_size, dtype=config.floatX)],
            -5.0,
            default_rtol,
            default_rtol,
        ),
        # test odd value
        (
            x.shape,
            sample_size_odd,
            [x],
            [np.zeros(sample_size_odd, dtype=config.floatX)],
            -5.0,
            default_rtol,
            default_rtol,
        ),
        (
            sample_size,
            sample_size,
            [],
            [],
            np.arange(np.prod(sample_size),
                      dtype="float32").reshape(sample_size),
            10.0 * std / np.sqrt(steps),
            default_rtol,
        ),
        # test empty size (scalar)
        ((), (), [], [], -5.0, default_rtol, 0.02),
        # test with few samples at the same time
        ((1, ), (1, ), [], [], -5.0, default_rtol, 0.02),
        ((3, ), (3, ), [], [], -5.0, default_rtol, 0.02),
    ]

    for size, const_size, var_input, input, avg, rtol, std_tol in test_cases:
        R = MRG_RandomStream(234)
        # Note: we specify `nstreams` to avoid a warning.
        n = R.normal(
            size=size,
            avg=avg,
            std=std,
            truncate=True,
            nstreams=rng_mrg.guess_n_streams(size, warn=False),
        )
        f = function(var_input, n)

        # check if truncated at 2*std
        samples = f(*input)
        assert np.all(
            avg + 2 * std - samples >= 0), "bad upper bound? {} {}".format(
                samples,
                avg + 2 * std,
            )
        assert np.all(samples -
                      (avg - 2 * std) >= 0), "bad lower bound? {} {}".format(
                          samples,
                          avg - 2 * std,
                      )

        # Increase the number of steps if size implies only a few samples
        if np.prod(const_size) < 10:
            steps_ = steps * 50
        else:
            steps_ = steps
        check_basics(
            f,
            steps_,
            const_size,
            target_avg=avg,
            target_std=target_std,
            prefix="mrg ",
            allow_01=True,
            inputs=input,
            mean_rtol=rtol,
            std_tol=std_tol,
        )

        sys.stdout.flush()
Beispiel #4
0
def test_normal0():
    steps = 50
    std = 2.0
    if (config.mode in ("DEBUG_MODE", "DebugMode", "FAST_COMPILE")
            or config.mode == "Mode" and config.linker in ["py"]):
        sample_size = (25, 30)
        default_rtol = 0.02
    else:
        sample_size = (999, 50)
        default_rtol = 0.01
    sample_size_odd = (sample_size[0], sample_size[1] - 1)
    x = matrix()

    test_cases = [
        (sample_size, sample_size, [], [], -5.0, default_rtol, default_rtol),
        (
            x.shape,
            sample_size,
            [x],
            [np.zeros(sample_size, dtype=config.floatX)],
            -5.0,
            default_rtol,
            default_rtol,
        ),
        # test odd value
        (
            x.shape,
            sample_size_odd,
            [x],
            [np.zeros(sample_size_odd, dtype=config.floatX)],
            -5.0,
            default_rtol,
            default_rtol,
        ),
        (
            sample_size,
            sample_size,
            [],
            [],
            np.arange(np.prod(sample_size),
                      dtype="float32").reshape(sample_size),
            10.0 * std / np.sqrt(steps),
            default_rtol,
        ),
        # test empty size (scalar)
        ((), (), [], [], -5.0, default_rtol, 0.02),
        # test with few samples at the same time
        ((1, ), (1, ), [], [], -5.0, default_rtol, 0.02),
        ((3, ), (3, ), [], [], -5.0, default_rtol, 0.02),
    ]

    for size, const_size, var_input, input, avg, rtol, std_tol in test_cases:
        R = MRG_RandomStream(234)
        # Note: we specify `nstreams` to avoid a warning.
        n = R.normal(
            size=size,
            avg=avg,
            std=std,
            nstreams=rng_mrg.guess_n_streams(size, warn=False),
        )
        f = function(var_input, n)
        f(*input)

        # Increase the number of steps if size implies only a few samples
        if np.prod(const_size) < 10:
            steps_ = steps * 50
        else:
            steps_ = steps
        check_basics(
            f,
            steps_,
            const_size,
            target_avg=avg,
            target_std=std,
            prefix="mrg ",
            allow_01=True,
            inputs=input,
            mean_rtol=rtol,
            std_tol=std_tol,
        )

        sys.stdout.flush()

        RR = RandomStream(235)

        nn = RR.normal(avg, std, size=size)
        ff = function(var_input, nn)

        check_basics(
            ff,
            steps_,
            const_size,
            target_avg=avg,
            target_std=std,
            prefix="numpy ",
            allow_01=True,
            inputs=input,
            mean_rtol=rtol,
        )
Beispiel #5
0
def test_undefined_grad():
    srng = MRG_RandomStream(seed=1234)

    # checking uniform distribution
    low = scalar()
    out = srng.uniform((), low=low)
    with pytest.raises(NullTypeGradError):
        grad(out, low)

    high = scalar()
    out = srng.uniform((), low=0, high=high)
    with pytest.raises(NullTypeGradError):
        grad(out, high)

    out = srng.uniform((), low=low, high=high)
    with pytest.raises(NullTypeGradError):
        grad(out, (low, high))

    # checking binomial distribution
    prob = scalar()
    out = srng.binomial((), p=prob)
    with pytest.raises(NullTypeGradError):
        grad(out, prob)

    # checking multinomial distribution
    prob1 = scalar()
    prob2 = scalar()
    p = [as_tensor_variable([prob1, 0.5, 0.25])]
    out = srng.multinomial(size=None, pvals=p, n=4)[0]
    with pytest.raises(NullTypeGradError):
        grad(at_sum(out), prob1)

    p = [as_tensor_variable([prob1, prob2])]
    out = srng.multinomial(size=None, pvals=p, n=4)[0]
    with pytest.raises(NullTypeGradError):
        grad(at_sum(out), (prob1, prob2))

    # checking choice
    p = [as_tensor_variable([prob1, prob2, 0.1, 0.2])]
    out = srng.choice(a=None, size=1, p=p, replace=False)[0]
    with pytest.raises(NullTypeGradError):
        grad(out[0], (prob1, prob2))

    p = [as_tensor_variable([prob1, prob2])]
    out = srng.choice(a=None, size=1, p=p, replace=False)[0]
    with pytest.raises(NullTypeGradError):
        grad(out[0], (prob1, prob2))

    p = [as_tensor_variable([prob1, 0.2, 0.3])]
    out = srng.choice(a=None, size=1, p=p, replace=False)[0]
    with pytest.raises(NullTypeGradError):
        grad(out[0], prob1)

    # checking normal distribution
    avg = scalar()
    out = srng.normal((), avg=avg)
    with pytest.raises(NullTypeGradError):
        grad(out, avg)

    std = scalar()
    out = srng.normal((), avg=0, std=std)
    with pytest.raises(NullTypeGradError):
        grad(out, std)

    out = srng.normal((), avg=avg, std=std)
    with pytest.raises(NullTypeGradError):
        grad(out, (avg, std))

    # checking truncated normal distribution
    avg = scalar()
    out = srng.truncated_normal((), avg=avg)
    with pytest.raises(NullTypeGradError):
        grad(out, avg)

    std = scalar()
    out = srng.truncated_normal((), avg=0, std=std)
    with pytest.raises(NullTypeGradError):
        grad(out, std)

    out = srng.truncated_normal((), avg=avg, std=std)
    with pytest.raises(NullTypeGradError):
        grad(out, (avg, std))