示例#1
0
def test_multinomial_dtypes():
    p = tensor.dmatrix()
    u = tensor.dvector()
    m = multinomial.MultinomialFromUniform('auto')(p, u)
    assert m.dtype == 'float64', m.dtype

    p = tensor.fmatrix()
    u = tensor.fvector()
    m = multinomial.MultinomialFromUniform('auto')(p, u)
    assert m.dtype == 'float32', m.dtype

    p = tensor.fmatrix()
    u = tensor.fvector()
    m = multinomial.MultinomialFromUniform('float64')(p, u)
    assert m.dtype == 'float64', m.dtype
示例#2
0
    def body(mode, gpu):
        p = tensor.fmatrix()
        u = tensor.fvector()
        m = multinomial.MultinomialFromUniform('auto')(p, u)
        f = function([p, u], m * 2, allow_input_downcast=True, mode=mode)
        if gpu:
            assert any([
                type(node.op) is multinomial.GpuMultinomialFromUniform
                for node in f.maker.fgraph.toposort()
            ])

        pval = numpy.arange(10000 * 4, dtype='float32').reshape(
            (10000, 4)) + 0.1
        pval = pval / pval.sum(axis=1)[:, None]
        uval = numpy.ones_like(pval[:, 0]) * 0.5
        mval = f(pval, uval)

        assert mval.shape == pval.shape
        if config.cast_policy == 'custom':
            assert mval.dtype == pval.dtype
        elif config.cast_policy == 'numpy+floatX':
            assert mval.dtype == config.floatX
        elif config.cast_policy == 'numpy':
            assert mval.dtype == 'float64'
        else:
            raise NotImplementedError(config.cast_policy)
        assert numpy.allclose(mval.sum(axis=1), 2)
        asdf = numpy.asarray([0, 0, 2, 0]) + 0 * pval
        assert numpy.allclose(mval, asdf)  # broadcast over all rows
示例#3
0
    def multinomial(self,
                    size=None,
                    n=1,
                    pvals=None,
                    ndim=None,
                    dtype='int64',
                    nstreams=None):
        """
        Sample `n` (currently `n` needs to be 1) times from a multinomial
        distribution defined by probabilities pvals.

        Example : pvals = [[.98,.01, .01], [.01, .98 .01]] will probably result
        in [[1,0,0],[0,1,0]].

        .. note::
            `size` and `ndim` are only there keep the same signature as other
            uniform, binomial, normal, etc.
            todo : adapt multinomial to take that into account
        """
        if pvals is None:
            raise TypeError("You have to specify pvals")
        pvals = as_tensor_variable(pvals)
        if n == 1 and pvals.ndim == 2:
            ndim, size, bcast = raw_random._infer_ndim_bcast(
                ndim, size, pvals[:, 0])
            assert ndim == 1
            bcast = bcast + (pvals.type.broadcastable[-1], )
            unis = self.uniform(size=size, ndim=1, nstreams=nstreams)
            op = multinomial.MultinomialFromUniform(dtype)
            return op(pvals, unis)
        else:
            raise NotImplementedError(
                ("MRG_RandomStreams.multinomial only"
                 " implemented with n == 1 and pvals.ndim = 2"))
示例#4
0
    def multinomial(self,
                    size=None,
                    n=1,
                    pvals=None,
                    ndim=None,
                    dtype='int64',
                    nstreams=None):
        """
        Sample `n` (currently `n` needs to be 1) times from a multinomial
        distribution defined by probabilities pvals.

        Example : pvals = [[.98, .01, .01], [.01, .98, .01]] will
        probably result in [[1,0,0],[0,1,0]].

        .. note::
            -`size` and `ndim` are only there keep the same signature as other
            uniform, binomial, normal, etc.
            todo : adapt multinomial to take that into account

            -Does not do any value checking on pvals, i.e. there is no
             check that the elements are non-negative, less than 1, or
             sum to 1. passing pvals = [[-2., 2.]] will result in
             sampling [[0, 0]]
        """
        if pvals is None:
            raise TypeError("You have to specify pvals")
        pvals = as_tensor_variable(pvals)
        if size is not None:
            if any([isinstance(i, int) and i <= 0 for i in size]):
                raise ValueError(
                    "The specified size contains a dimension with value <= 0",
                    size)

        if n == 1 and pvals.ndim == 2:
            if size is not None:
                raise ValueError(
                    "Provided a size argument to "
                    "MRG_RandomStreams.multinomial, which does not use "
                    "the size argument.")
            if ndim is not None:
                raise ValueError(
                    "Provided an ndim argument to "
                    "MRG_RandomStreams.multinomial, which does not use "
                    "the ndim argument.")
            ndim, size, bcast = raw_random._infer_ndim_bcast(
                ndim, size, pvals[:, 0])
            assert ndim == 1
            bcast = bcast + (pvals.type.broadcastable[-1], )
            unis = self.uniform(size=size, ndim=1, nstreams=nstreams)
            op = multinomial.MultinomialFromUniform(dtype)
            return op(pvals, unis)
        else:
            raise NotImplementedError(
                ("MRG_RandomStreams.multinomial only"
                 " implemented with n == 1 and pvals.ndim = 2"))
示例#5
0
def test_gpu_opt():
    if not cuda.cuda_available:
        # Skip test if cuda_ndarray is not available.
        from nose.plugins.skip import SkipTest
        raise SkipTest('Optional package cuda not available')

    # We test the case where we put the op on the gpu when the output
    # is moved to the gpu.
    p = tensor.fmatrix()
    u = tensor.fvector()
    m = multinomial.MultinomialFromUniform('auto')(p, u)
    assert m.dtype == 'float32', m.dtype
    m_gpu = cuda.gpu_from_host(m)

    f = function([p, u], m_gpu, allow_input_downcast=True, mode=get_mode(True))
    assert any([
        type(node.op) is multinomial.GpuMultinomialFromUniform
        for node in f.maker.fgraph.toposort()
    ])
    pval = numpy.arange(10000 * 4, dtype='float32').reshape((10000, 4)) + 0.1
    pval = pval / pval.sum(axis=1)[:, None]
    uval = numpy.ones_like(pval[:, 0]) * 0.5
    mval = f(pval, uval)

    # Test with a row, it was failing in the past.
    r = tensor.frow()
    m = multinomial.MultinomialFromUniform('auto')(r, u)
    assert m.dtype == 'float32', m.dtype
    m_gpu = cuda.gpu_from_host(m)

    f = function([r, u], m_gpu, allow_input_downcast=True, mode=get_mode(True))
    assert any([
        type(node.op) is multinomial.GpuMultinomialFromUniform
        for node in f.maker.fgraph.toposort()
    ])
    pval = numpy.arange(1 * 4, dtype='float32').reshape((1, 4)) + 0.1
    pval = pval / pval.sum(axis=1)[:, None]
    uval = numpy.ones_like(pval[:, 0]) * 0.5
    mval2 = f(pval, uval)
示例#6
0
def test_multinomial_0():
    # This tests the MultinomialFromUniform Op directly, not going through the
    # multinomial() call in GPU random generation.

    p = tensor.fmatrix()
    u = tensor.fvector()

    m = multinomial.MultinomialFromUniform('auto')(p, u)

    def body(mode, gpu):
        # the m*2 allows the multinomial to reuse output
        f = function([p, u], m * 2, allow_input_downcast=True, mode=mode)
        if gpu:
            assert any([
                type(node.op) is multinomial.GpuMultinomialFromUniform
                for node in f.maker.fgraph.toposort()
            ])

        # test that both first and second samples can be drawn
        assert numpy.allclose(f([[1, 0], [0, 1]], [.1, .1]), [[2, 0], [0, 2]])

        # test that both second labels can be drawn
        r = f([[.2, .8], [.3, .7]], [.31, .31])
        assert numpy.allclose(r, [[0, 2], [0, 2]]), r

        # test that both first labels can be drawn
        r = f([[.2, .8], [.3, .7]], [.21, .21])
        assert numpy.allclose(r, [[0, 2], [2, 0]]), r

        # change the size to make sure output gets reallocated ok
        # and also make sure that the GPU version doesn't screw up the
        # transposed-ness
        r = f([[.2, .8]], [.25])
        assert numpy.allclose(r, [[0, 2]]), r

    run_with_c(body)
    if cuda.cuda_available:
        run_with_c(body, True)