Exemplo n.º 1
0
    def test_bad_size(self):

        R = MRG_RandomStreams(234)

        for size in [
            (0, 100),
            (-1, 100),
            (1, 0),
        ]:

            with pytest.raises(ValueError):
                R.uniform(size)
            with pytest.raises(ValueError):
                R.binomial(size)
            with pytest.raises(ValueError):
                R.multinomial(size, 1, [])
            with pytest.raises(ValueError):
                R.normal(size)
            with pytest.raises(ValueError):
                R.truncated_normal(size)
Exemplo n.º 2
0
def test_truncated_normal():
    # just a copy of test_normal0 for truncated normal
    steps = 50
    std = 2.

    if (config.mode in ['DEBUG_MODE', 'DebugMode', 'FAST_COMPILE'] or
            config.mode == 'Mode' and config.linker in ['py']):
        sample_size = (25, 30)
        default_rtol = .02
    else:
        sample_size = (999, 50)
        default_rtol = .01
    sample_size_odd = (sample_size[0], sample_size[1] - 1)
    x = tensor.matrix()

    test_cases = [
        (sample_size, sample_size, [], [], -5., default_rtol, default_rtol),
        (x.shape, sample_size, [x],
         [np.zeros(sample_size, dtype=config.floatX)],
         -5., default_rtol, default_rtol),
        # test odd value
        (x.shape, sample_size_odd, [x],
         [np.zeros(sample_size_odd, dtype=config.floatX)],
         -5., default_rtol, default_rtol),
        (sample_size, sample_size, [], [],
         np.arange(np.prod(sample_size),
                   dtype='float32').reshape(sample_size),
         10. * std / np.sqrt(steps), default_rtol),
        # test empty size (scalar)
        ((), (), [], [], -5., default_rtol, 0.02),
        # test with few samples at the same time
        ((1,), (1,), [], [], -5., default_rtol, 0.02),
        ((3,), (3,), [], [], -5., default_rtol, 0.02),
    ]

    for size, const_size, var_input, input, avg, rtol, std_tol in test_cases:
        R = MRG_RandomStreams(234)
        # Note: we specify `nstreams` to avoid a warning.
        n = R.truncated_normal(size=size, avg=avg, std=std,
                               nstreams=rng_mrg.guess_n_streams(size, warn=False))
        f = theano.function(var_input, n)

        # Increase the number of steps if size implies only a few samples
        if np.prod(const_size) < 10:
            steps_ = steps * 60
        else:
            steps_ = steps
        basictest(f, steps_, const_size, target_avg=avg, target_std=std,
                  prefix='mrg ', allow_01=True, inputs=input,
                  mean_rtol=rtol, std_tol=std_tol)

        sys.stdout.flush()
Exemplo n.º 3
0
def test_target_parameter():
    srng = MRG_RandomStreams()
    pvals = np.array([[.98, .01, .01], [.01, .49, .50]])

    def basic_target_parameter_test(x):
        f = theano.function([], x)
        assert isinstance(f(), np.ndarray)

    basic_target_parameter_test(srng.uniform((3, 2), target='cpu'))
    basic_target_parameter_test(srng.normal((3, 2), target='cpu'))
    basic_target_parameter_test(srng.truncated_normal((3, 2), target='cpu'))
    basic_target_parameter_test(srng.binomial((3, 2), target='cpu'))
    basic_target_parameter_test(srng.multinomial(pvals=pvals.astype('float32'), target='cpu'))
    basic_target_parameter_test(srng.choice(p=pvals.astype('float32'), replace=False, target='cpu'))
    basic_target_parameter_test(srng.multinomial_wo_replacement(pvals=pvals.astype('float32'), target='cpu'))
Exemplo n.º 4
0
def test_target_parameter():
    srng = MRG_RandomStreams()
    pvals = np.array([[0.98, 0.01, 0.01], [0.01, 0.49, 0.50]])

    def basic_target_parameter_test(x):
        f = theano.function([], x)
        assert isinstance(f(), np.ndarray)

    basic_target_parameter_test(srng.uniform((3, 2), target="cpu"))
    basic_target_parameter_test(srng.normal((3, 2), target="cpu"))
    basic_target_parameter_test(srng.truncated_normal((3, 2), target="cpu"))
    basic_target_parameter_test(srng.binomial((3, 2), target="cpu"))
    basic_target_parameter_test(
        srng.multinomial(pvals=pvals.astype("float32"), target="cpu"))
    basic_target_parameter_test(
        srng.choice(p=pvals.astype("float32"), replace=False, target="cpu"))
    basic_target_parameter_test(
        srng.multinomial_wo_replacement(pvals=pvals.astype("float32"),
                                        target="cpu"))
Exemplo n.º 5
0
def test_undefined_grad():
    srng = MRG_RandomStreams(seed=1234)

    # checking uniform distribution
    low = tensor.scalar()
    out = srng.uniform((), low=low)
    with pytest.raises(theano.gradient.NullTypeGradError):
        theano.grad(out, low)

    high = tensor.scalar()
    out = srng.uniform((), low=0, high=high)
    with pytest.raises(theano.gradient.NullTypeGradError):
        theano.grad(out, high)

    out = srng.uniform((), low=low, high=high)
    with pytest.raises(theano.gradient.NullTypeGradError):
        theano.grad(out, (low, high))

    # checking binomial distribution
    prob = tensor.scalar()
    out = srng.binomial((), p=prob)
    with pytest.raises(theano.gradient.NullTypeGradError):
        theano.grad(out, prob)

    # checking multinomial distribution
    prob1 = tensor.scalar()
    prob2 = tensor.scalar()
    p = [theano.tensor.as_tensor_variable([prob1, 0.5, 0.25])]
    out = srng.multinomial(size=None, pvals=p, n=4)[0]
    with pytest.raises(theano.gradient.NullTypeGradError):
        theano.grad(theano.tensor.sum(out), prob1)

    p = [theano.tensor.as_tensor_variable([prob1, prob2])]
    out = srng.multinomial(size=None, pvals=p, n=4)[0]
    with pytest.raises(theano.gradient.NullTypeGradError):
        theano.grad(theano.tensor.sum(out), (prob1, prob2))

    # checking choice
    p = [theano.tensor.as_tensor_variable([prob1, prob2, 0.1, 0.2])]
    out = srng.choice(a=None, size=1, p=p, replace=False)[0]
    with pytest.raises(theano.gradient.NullTypeGradError):
        theano.grad(out[0], (prob1, prob2))

    p = [theano.tensor.as_tensor_variable([prob1, prob2])]
    out = srng.choice(a=None, size=1, p=p, replace=False)[0]
    with pytest.raises(theano.gradient.NullTypeGradError):
        theano.grad(out[0], (prob1, prob2))

    p = [theano.tensor.as_tensor_variable([prob1, 0.2, 0.3])]
    out = srng.choice(a=None, size=1, p=p, replace=False)[0]
    with pytest.raises(theano.gradient.NullTypeGradError):
        theano.grad(out[0], prob1)

    # checking normal distribution
    avg = tensor.scalar()
    out = srng.normal((), avg=avg)
    with pytest.raises(theano.gradient.NullTypeGradError):
        theano.grad(out, avg)

    std = tensor.scalar()
    out = srng.normal((), avg=0, std=std)
    with pytest.raises(theano.gradient.NullTypeGradError):
        theano.grad(out, std)

    out = srng.normal((), avg=avg, std=std)
    with pytest.raises(theano.gradient.NullTypeGradError):
        theano.grad(out, (avg, std))

    # checking truncated normal distribution
    avg = tensor.scalar()
    out = srng.truncated_normal((), avg=avg)
    with pytest.raises(theano.gradient.NullTypeGradError):
        theano.grad(out, avg)

    std = tensor.scalar()
    out = srng.truncated_normal((), avg=0, std=std)
    with pytest.raises(theano.gradient.NullTypeGradError):
        theano.grad(out, std)

    out = srng.truncated_normal((), avg=avg, std=std)
    with pytest.raises(theano.gradient.NullTypeGradError):
        theano.grad(out, (avg, std))
Exemplo n.º 6
0
def test_truncated_normal():
    # just a copy of test_normal0 for truncated normal
    steps = 50
    std = 2.0

    if (config.mode in ["DEBUG_MODE", "DebugMode", "FAST_COMPILE"]
            or config.mode == "Mode" and config.linker in ["py"]):
        sample_size = (25, 30)
        default_rtol = 0.02
    else:
        sample_size = (999, 50)
        default_rtol = 0.01
    sample_size_odd = (sample_size[0], sample_size[1] - 1)
    x = tensor.matrix()

    test_cases = [
        (sample_size, sample_size, [], [], -5.0, default_rtol, default_rtol),
        (
            x.shape,
            sample_size,
            [x],
            [np.zeros(sample_size, dtype=config.floatX)],
            -5.0,
            default_rtol,
            default_rtol,
        ),
        # test odd value
        (
            x.shape,
            sample_size_odd,
            [x],
            [np.zeros(sample_size_odd, dtype=config.floatX)],
            -5.0,
            default_rtol,
            default_rtol,
        ),
        (
            sample_size,
            sample_size,
            [],
            [],
            np.arange(np.prod(sample_size),
                      dtype="float32").reshape(sample_size),
            10.0 * std / np.sqrt(steps),
            default_rtol,
        ),
        # test empty size (scalar)
        ((), (), [], [], -5.0, default_rtol, 0.02),
        # test with few samples at the same time
        ((1, ), (1, ), [], [], -5.0, default_rtol, 0.02),
        ((3, ), (3, ), [], [], -5.0, default_rtol, 0.02),
    ]

    for size, const_size, var_input, input, avg, rtol, std_tol in test_cases:
        R = MRG_RandomStreams(234)
        # Note: we specify `nstreams` to avoid a warning.
        n = R.truncated_normal(
            size=size,
            avg=avg,
            std=std,
            nstreams=rng_mrg.guess_n_streams(size, warn=False),
        )
        f = theano.function(var_input, n)

        # Increase the number of steps if size implies only a few samples
        if np.prod(const_size) < 10:
            steps_ = steps * 60
        else:
            steps_ = steps
        check_basics(
            f,
            steps_,
            const_size,
            target_avg=avg,
            target_std=std,
            prefix="mrg ",
            allow_01=True,
            inputs=input,
            mean_rtol=rtol,
            std_tol=std_tol,
        )

        sys.stdout.flush()
Exemplo n.º 7
0
def test_undefined_grad():
    srng = MRG_RandomStreams(seed=1234)

    # checking uniform distribution
    low = tensor.scalar()
    out = srng.uniform((), low=low)
    assert_raises(theano.gradient.NullTypeGradError, theano.grad, out, low)

    high = tensor.scalar()
    out = srng.uniform((), low=0, high=high)
    assert_raises(theano.gradient.NullTypeGradError, theano.grad, out, high)

    out = srng.uniform((), low=low, high=high)
    assert_raises(theano.gradient.NullTypeGradError, theano.grad, out,
                  (low, high))

    # checking binomial distribution
    prob = tensor.scalar()
    out = srng.binomial((), p=prob)
    assert_raises(theano.gradient.NullTypeGradError, theano.grad, out, prob)

    # checking multinomial distribution
    prob1 = tensor.scalar()
    prob2 = tensor.scalar()
    p = [theano.tensor.as_tensor_variable([prob1, 0.5, 0.25])]
    out = srng.multinomial(size=None, pvals=p, n=4)[0]
    assert_raises(theano.gradient.NullTypeGradError, theano.grad,
                  theano.tensor.sum(out), prob1)

    p = [theano.tensor.as_tensor_variable([prob1, prob2])]
    out = srng.multinomial(size=None, pvals=p, n=4)[0]
    assert_raises(theano.gradient.NullTypeGradError, theano.grad,
                  theano.tensor.sum(out), (prob1, prob2))

    # checking choice
    p = [theano.tensor.as_tensor_variable([prob1, prob2, 0.1, 0.2])]
    out = srng.choice(a=None, size=1, p=p, replace=False)[0]
    assert_raises(theano.gradient.NullTypeGradError, theano.grad, out[0],
                  (prob1, prob2))

    p = [theano.tensor.as_tensor_variable([prob1, prob2])]
    out = srng.choice(a=None, size=1, p=p, replace=False)[0]
    assert_raises(theano.gradient.NullTypeGradError, theano.grad, out[0],
                  (prob1, prob2))

    p = [theano.tensor.as_tensor_variable([prob1, 0.2, 0.3])]
    out = srng.choice(a=None, size=1, p=p, replace=False)[0]
    assert_raises(theano.gradient.NullTypeGradError, theano.grad, out[0],
                  prob1)

    # checking normal distribution
    avg = tensor.scalar()
    out = srng.normal((), avg=avg)
    assert_raises(theano.gradient.NullTypeGradError, theano.grad, out, avg)

    std = tensor.scalar()
    out = srng.normal((), avg=0, std=std)
    assert_raises(theano.gradient.NullTypeGradError, theano.grad, out, std)

    out = srng.normal((), avg=avg, std=std)
    assert_raises(theano.gradient.NullTypeGradError, theano.grad, out,
                  (avg, std))

    # checking truncated normal distribution
    avg = tensor.scalar()
    out = srng.truncated_normal((), avg=avg)
    assert_raises(theano.gradient.NullTypeGradError, theano.grad, out, avg)

    std = tensor.scalar()
    out = srng.truncated_normal((), avg=0, std=std)
    assert_raises(theano.gradient.NullTypeGradError, theano.grad, out, std)

    out = srng.truncated_normal((), avg=avg, std=std)
    assert_raises(theano.gradient.NullTypeGradError, theano.grad, out,
                  (avg, std))