コード例 #1
0
ファイル: test_rng_mrg.py プロジェクト: wjbianjason/Theano
def test_consistency_GPU_parallel():
    """
    Verify that the random numbers generated by GPU_mrg_uniform, in
    parallel, are the same as the reference (Java) implementation by
    L'Ecuyer et al.

    """
    if not cuda_available:
        raise SkipTest('Optional package cuda not available')
    if config.mode == 'FAST_COMPILE':
        mode = 'FAST_RUN'
    else:
        mode = config.mode

    seed = 12345
    n_samples = 5
    n_streams = 12
    n_substreams = 7  # 7 samples will be drawn in parallel

    samples = []
    curr_rstate = numpy.array([seed] * 6, dtype='int32')

    for i in range(n_streams):
        stream_samples = []
        rstate = [curr_rstate.copy()]
        for j in range(1, n_substreams):
            rstate.append(rng_mrg.ff_2p72(rstate[-1]))
        rstate = numpy.asarray(rstate).flatten()
        # HACK - transfer these int32 to the GPU memory as float32
        # (reinterpret_cast)
        tmp_float_buf = numpy.frombuffer(rstate.data, dtype='float32')
        # Transfer to device
        rstate = float32_shared_constructor(tmp_float_buf)

        new_rstate, sample = rng_mrg.GPU_mrg_uniform.new(rstate,
                                                         ndim=None,
                                                         dtype='float32',
                                                         size=(n_substreams, ))
        rstate.default_update = new_rstate

        # Not really necessary, just mimicking
        # rng_mrg.MRG_RandomStreams' behavior
        sample.rstate = rstate
        sample.update = (rstate, new_rstate)

        # We need the sample back in the main memory
        cpu_sample = tensor.as_tensor_variable(sample)
        f = theano.function([], cpu_sample, mode=mode)

        for k in range(n_samples):
            s = f()
            stream_samples.append(s)

        samples.append(numpy.array(stream_samples).T.flatten())

        # next stream
        curr_rstate = rng_mrg.ff_2p134(curr_rstate)

    samples = numpy.array(samples).flatten()
    assert (numpy.allclose(samples, java_samples))
コード例 #2
0
ファイル: test_rng_mrg.py プロジェクト: 5730279821-TA/Theano
def test_overflow_gpu_new_backend():
    # run with THEANO_FLAGS=mode=FAST_RUN,init_gpu_device=cuda1,device=cpu
    from theano.sandbox.gpuarray.tests.test_basic_ops import \
        mode_with_gpu as mode
    from theano.sandbox.gpuarray.type import gpuarray_shared_constructor
    seed = 12345
    n_substreams = 7
    curr_rstate = numpy.array([seed] * 6, dtype='int32')
    rstate = [curr_rstate.copy()]
    for j in range(1, n_substreams):
        rstate.append(rng_mrg.ff_2p72(rstate[-1]))
    rstate = numpy.asarray(rstate)
    rstate = gpuarray_shared_constructor(rstate)
    fct = functools.partial(rng_mrg.GPUA_mrg_uniform.new, rstate,
                            ndim=None, dtype='float32')
    # should raise error as the size overflows
    sizes = [(2**31, ), (2**32, ), (2**15, 2**16,), (2, 2**15, 2**15)]
    rng_mrg_overflow(sizes, fct, mode, should_raise_error=True)
    # should not raise error
    sizes = [(2**5, ), (2**5, 2**5), (2**5, 2**5, 2**5)]
    rng_mrg_overflow(sizes, fct, mode, should_raise_error=False)
    # should support int32 sizes
    sizes = [(numpy.int32(2**10), ),
             (numpy.int32(2), numpy.int32(2**10), numpy.int32(2**10))]
    rng_mrg_overflow(sizes, fct, mode, should_raise_error=False)
コード例 #3
0
def test_consistency_cpu_serial():
    '''Verify that the random numbers generated by mrg_uniform, serially,
    are the same as the reference (Java) implementation by L'Ecuyer et al.
    '''
    seed = 12345
    n_samples = 5
    n_streams = 12
    n_substreams = 7

    samples = []
    curr_rstate = numpy.array([seed] * 6, dtype='int32')

    for i in range(n_streams):
        stream_rstate = curr_rstate.copy()
        for j in range(n_substreams):
            rstate = theano.shared(numpy.array([stream_rstate.copy()], dtype='int32'))
            new_rstate, sample = rng_mrg.mrg_uniform.new(rstate, ndim=None, dtype=config.floatX, size=(1,))
            # Not really necessary, just mimicking rng_mrg.MRG_RandomStreams' behavior
            sample.rstate = rstate
            sample.update = (rstate, new_rstate)

            rstate.default_update = new_rstate
            f = theano.function([], sample)
            for k in range(n_samples):
                s = f()
                samples.append(s)

            # next substream
            stream_rstate = rng_mrg.ff_2p72(stream_rstate)

        # next stream
        curr_rstate = rng_mrg.ff_2p134(curr_rstate)

    samples = numpy.array(samples).flatten()
    assert(numpy.allclose(samples, java_samples))
コード例 #4
0
ファイル: test_rng_mrg.py プロジェクト: truell20/grammarVAE
def test_overflow_gpu_new_backend():
    # run with THEANO_FLAGS=mode=FAST_RUN,init_gpu_device=cuda1,device=cpu
    from theano.sandbox.gpuarray.tests.test_basic_ops import \
        mode_with_gpu as mode
    from theano.sandbox.gpuarray.type import gpuarray_shared_constructor
    seed = 12345
    n_substreams = 7
    curr_rstate = numpy.array([seed] * 6, dtype='int32')
    rstate = [curr_rstate.copy()]
    for j in range(1, n_substreams):
        rstate.append(rng_mrg.ff_2p72(rstate[-1]))
    rstate = numpy.asarray(rstate)
    rstate = gpuarray_shared_constructor(rstate)
    fct = functools.partial(rng_mrg.GPUA_mrg_uniform.new,
                            rstate,
                            ndim=None,
                            dtype='float32')
    # should raise error as the size overflows
    sizes = [(2**31, ), (2**32, ), (
        2**15,
        2**16,
    ), (2, 2**15, 2**15)]
    rng_mrg_overflow(sizes, fct, mode, should_raise_error=True)
    # should not raise error
    sizes = [(2**5, ), (2**5, 2**5), (2**5, 2**5, 2**5)]
    rng_mrg_overflow(sizes, fct, mode, should_raise_error=False)
    # should support int32 sizes
    sizes = [(numpy.int32(2**10), ),
             (numpy.int32(2), numpy.int32(2**10), numpy.int32(2**10))]
    rng_mrg_overflow(sizes, fct, mode, should_raise_error=False)
コード例 #5
0
ファイル: test_rng_mrg.py プロジェクト: zmdfwh/Theano
def test_overflow_gpu_new_backend():
    seed = 12345
    n_substreams = 7
    curr_rstate = np.array([seed] * 6, dtype='int32')
    rstate = [curr_rstate.copy()]
    for j in range(1, n_substreams):
        rstate.append(rng_mrg.ff_2p72(rstate[-1]))
    rstate = np.asarray(rstate)
    rstate = gpuarray_shared_constructor(rstate)
    fct = functools.partial(GPUA_mrg_uniform.new,
                            rstate,
                            ndim=None,
                            dtype='float32')
    # should raise error as the size overflows
    sizes = [(2**31, ), (2**32, ), (
        2**15,
        2**16,
    ), (2, 2**15, 2**15)]
    rng_mrg_overflow(sizes, fct, mode, should_raise_error=True)
    # should not raise error
    sizes = [(2**5, ), (2**5, 2**5), (2**5, 2**5, 2**5)]
    rng_mrg_overflow(sizes, fct, mode, should_raise_error=False)
    # should support int32 sizes
    sizes = [(np.int32(2**10), ),
             (np.int32(2), np.int32(2**10), np.int32(2**10))]
    rng_mrg_overflow(sizes, fct, mode, should_raise_error=False)
コード例 #6
0
ファイル: test_rng_mrg.py プロジェクト: ALISCIFP/Segmentation
def test_consistency_GPU_parallel():
    """
    Verify that the random numbers generated by GPU_mrg_uniform, in
    parallel, are the same as the reference (Java) implementation by
    L'Ecuyer et al.

    """
    if not cuda_available:
        raise SkipTest('Optional package cuda not available')
    if config.mode == 'FAST_COMPILE':
        mode = 'FAST_RUN'
    else:
        mode = config.mode

    seed = 12345
    n_samples = 5
    n_streams = 12
    n_substreams = 7  # 7 samples will be drawn in parallel

    samples = []
    curr_rstate = numpy.array([seed] * 6, dtype='int32')

    for i in range(n_streams):
        stream_samples = []
        rstate = [curr_rstate.copy()]
        for j in range(1, n_substreams):
            rstate.append(rng_mrg.ff_2p72(rstate[-1]))
        rstate = numpy.asarray(rstate).flatten()
        # HACK - transfer these int32 to the GPU memory as float32
        # (reinterpret_cast)
        tmp_float_buf = numpy.frombuffer(rstate.data, dtype='float32')
        # Transfer to device
        rstate = float32_shared_constructor(tmp_float_buf)

        new_rstate, sample = rng_mrg.GPU_mrg_uniform.new(rstate, ndim=None,
                                                         dtype='float32',
                                                         size=(n_substreams,))
        rstate.default_update = new_rstate

        # Not really necessary, just mimicking
        # rng_mrg.MRG_RandomStreams' behavior
        sample.rstate = rstate
        sample.update = (rstate, new_rstate)

        # We need the sample back in the main memory
        cpu_sample = tensor.as_tensor_variable(sample)
        f = theano.function([], cpu_sample, mode=mode)

        for k in range(n_samples):
            s = f()
            stream_samples.append(s)

        samples.append(numpy.array(stream_samples).T.flatten())

        # next stream
        curr_rstate = rng_mrg.ff_2p134(curr_rstate)

    samples = numpy.array(samples).flatten()
    assert(numpy.allclose(samples, java_samples))
コード例 #7
0
ファイル: test_rng_mrg.py プロジェクト: wjbianjason/Theano
def test_consistency_GPUA_serial():
    """
    Verify that the random numbers generated by GPUA_mrg_uniform, serially,
    are the same as the reference (Java) implementation by L'Ecuyer et al.

    """
    from theano.sandbox.gpuarray.tests.test_basic_ops import \
        mode_with_gpu as mode
    from theano.sandbox.gpuarray.type import gpuarray_shared_constructor

    seed = 12345
    n_samples = 5
    n_streams = 12
    n_substreams = 7

    samples = []
    curr_rstate = numpy.array([seed] * 6, dtype='int32')

    for i in range(n_streams):
        stream_rstate = curr_rstate.copy()
        for j in range(n_substreams):
            substream_rstate = numpy.array([stream_rstate.copy()],
                                           dtype='int32')
            # Transfer to device
            rstate = gpuarray_shared_constructor(substream_rstate)

            new_rstate, sample = rng_mrg.GPUA_mrg_uniform.new(rstate,
                                                              ndim=None,
                                                              dtype='float32',
                                                              size=(1, ))
            rstate.default_update = new_rstate

            # Not really necessary, just mimicking
            # rng_mrg.MRG_RandomStreams' behavior
            sample.rstate = rstate
            sample.update = (rstate, new_rstate)

            # We need the sample back in the main memory
            cpu_sample = tensor.as_tensor_variable(sample)
            f = theano.function([], cpu_sample, mode=mode)
            for k in range(n_samples):
                s = f()
                samples.append(s)

            # next substream
            stream_rstate = rng_mrg.ff_2p72(stream_rstate)

        # next stream
        curr_rstate = rng_mrg.ff_2p134(curr_rstate)

    samples = numpy.array(samples).flatten()
    assert (numpy.allclose(samples, java_samples))
コード例 #8
0
ファイル: test_rng_mrg.py プロジェクト: ALISCIFP/Segmentation
def test_consistency_GPUA_parallel():
    """
    Verify that the random numbers generated by GPUA_mrg_uniform, in
    parallel, are the same as the reference (Java) implementation by
    L'Ecuyer et al.

    """
    from theano.sandbox.gpuarray.tests.test_basic_ops import \
        mode_with_gpu as mode
    from theano.sandbox.gpuarray.type import gpuarray_shared_constructor

    seed = 12345
    n_samples = 5
    n_streams = 12
    n_substreams = 7  # 7 samples will be drawn in parallel

    samples = []
    curr_rstate = numpy.array([seed] * 6, dtype='int32')

    for i in range(n_streams):
        stream_samples = []
        rstate = [curr_rstate.copy()]
        for j in range(1, n_substreams):
            rstate.append(rng_mrg.ff_2p72(rstate[-1]))
        rstate = numpy.asarray(rstate)
        rstate = gpuarray_shared_constructor(rstate)

        new_rstate, sample = rng_mrg.GPUA_mrg_uniform.new(rstate, ndim=None,
                                                          dtype='float32',
                                                          size=(n_substreams,))
        rstate.default_update = new_rstate

        # Not really necessary, just mimicking
        # rng_mrg.MRG_RandomStreams' behavior
        sample.rstate = rstate
        sample.update = (rstate, new_rstate)

        # We need the sample back in the main memory
        cpu_sample = tensor.as_tensor_variable(sample)
        f = theano.function([], cpu_sample, mode=mode)

        for k in range(n_samples):
            s = f()
            stream_samples.append(s)

        samples.append(numpy.array(stream_samples).T.flatten())

        # next stream
        curr_rstate = rng_mrg.ff_2p134(curr_rstate)

    samples = numpy.array(samples).flatten()
    assert(numpy.allclose(samples, java_samples))
コード例 #9
0
ファイル: test_rng_mrg.py プロジェクト: hamelphi/Theano
def test_consistency_GPU_serial():
    """Verify that the random numbers generated by GPU_mrg_uniform, serially,
    are the same as the reference (Java) implementation by L'Ecuyer et al.
    """
    if not cuda_available:
        raise SkipTest("Optional package cuda not available")
    if config.mode == "FAST_COMPILE":
        mode = "FAST_RUN"
    else:
        mode = config.mode

    seed = 12345
    n_samples = 5
    n_streams = 12
    n_substreams = 7

    samples = []
    curr_rstate = numpy.array([seed] * 6, dtype="int32")

    for i in range(n_streams):
        stream_rstate = curr_rstate.copy()
        for j in range(n_substreams):
            substream_rstate = numpy.array(stream_rstate.copy(), dtype="int32")
            # HACK - we transfer these int32 to the GPU memory as float32
            # (reinterpret_cast)
            tmp_float_buf = numpy.frombuffer(substream_rstate.data, dtype="float32")
            rstate = float32_shared_constructor(tmp_float_buf)  # Transfer to device

            new_rstate, sample = rng_mrg.GPU_mrg_uniform.new(rstate, ndim=None, dtype="float32", size=(1,))
            rstate.default_update = new_rstate

            # Not really necessary, just mimicking rng_mrg.MRG_RandomStreams' behavior
            sample.rstate = rstate
            sample.update = (rstate, new_rstate)

            # We need the sample back in the main memory
            cpu_sample = tensor.as_tensor_variable(sample)
            f = theano.function([], cpu_sample, mode=mode)
            for k in range(n_samples):
                s = f()
                samples.append(s)

            # next substream
            stream_rstate = rng_mrg.ff_2p72(stream_rstate)

        # next stream
        curr_rstate = rng_mrg.ff_2p134(curr_rstate)

    samples = numpy.array(samples).flatten()
    assert numpy.allclose(samples, java_samples)
コード例 #10
0
ファイル: test_rng_mrg.py プロジェクト: yukoba/Theano
def test_consistency_GPUA_parallel():
    # Verify that the random numbers generated by GPUA_mrg_uniform, in
    # parallel, are the same as the reference (Java) implementation by
    # L'Ecuyer et al.
    from theano.gpuarray.tests.config import mode_with_gpu as mode
    from theano.gpuarray.type import gpuarray_shared_constructor

    seed = 12345
    n_samples = 5
    n_streams = 12
    n_substreams = 7  # 7 samples will be drawn in parallel

    samples = []
    curr_rstate = numpy.array([seed] * 6, dtype='int32')

    for i in range(n_streams):
        stream_samples = []
        rstate = [curr_rstate.copy()]
        for j in range(1, n_substreams):
            rstate.append(rng_mrg.ff_2p72(rstate[-1]))
        rstate = numpy.asarray(rstate)
        rstate = gpuarray_shared_constructor(rstate)

        new_rstate, sample = rng_mrg.GPUA_mrg_uniform.new(rstate, ndim=None,
                                                          dtype='float32',
                                                          size=(n_substreams,))
        rstate.default_update = new_rstate

        # Not really necessary, just mimicking
        # rng_mrg.MRG_RandomStreams' behavior
        sample.rstate = rstate
        sample.update = (rstate, new_rstate)

        # We need the sample back in the main memory
        cpu_sample = tensor.as_tensor_variable(sample)
        f = theano.function([], cpu_sample, mode=mode)

        for k in range(n_samples):
            s = f()
            stream_samples.append(s)

        samples.append(numpy.array(stream_samples).T.flatten())

        # next stream
        curr_rstate = rng_mrg.ff_2p134(curr_rstate)

    samples = numpy.array(samples).flatten()
    assert(numpy.allclose(samples, java_samples))
コード例 #11
0
ファイル: test_rng_mrg.py プロジェクト: ChinaQuants/Theano
def test_consistency_GPUA_serial():
    # Verify that the random numbers generated by GPUA_mrg_uniform, serially,
    # are the same as the reference (Java) implementation by L'Ecuyer et al.
    from theano.gpuarray.tests.config import mode_with_gpu as mode
    from theano.gpuarray.type import gpuarray_shared_constructor

    seed = 12345
    n_samples = 5
    n_streams = 12
    n_substreams = 7

    samples = []
    curr_rstate = numpy.array([seed] * 6, dtype='int32')

    for i in range(n_streams):
        stream_rstate = curr_rstate.copy()
        for j in range(n_substreams):
            substream_rstate = numpy.array([stream_rstate.copy()],
                                           dtype='int32')
            # Transfer to device
            rstate = gpuarray_shared_constructor(substream_rstate)

            new_rstate, sample = rng_mrg.GPUA_mrg_uniform.new(rstate,
                                                              ndim=None,
                                                              dtype='float32',
                                                              size=(1,))
            rstate.default_update = new_rstate

            # Not really necessary, just mimicking
            # rng_mrg.MRG_RandomStreams' behavior
            sample.rstate = rstate
            sample.update = (rstate, new_rstate)

            # We need the sample back in the main memory
            cpu_sample = tensor.as_tensor_variable(sample)
            f = theano.function([], cpu_sample, mode=mode)
            for k in range(n_samples):
                s = f()
                samples.append(s)

            # next substream
            stream_rstate = rng_mrg.ff_2p72(stream_rstate)

        # next stream
        curr_rstate = rng_mrg.ff_2p134(curr_rstate)

    samples = numpy.array(samples).flatten()
    assert(numpy.allclose(samples, java_samples))
コード例 #12
0
ファイル: test_rng_mrg.py プロジェクト: wjbianjason/Theano
def test_consistency_cpu_parallel():
    """
    Verify that the random numbers generated by mrg_uniform, in parallel,
    are the same as the reference (Java) implementation by L'Ecuyer et al.

    """
    seed = 12345
    n_samples = 5
    n_streams = 12
    n_substreams = 7  # 7 samples will be drawn in parallel

    samples = []
    curr_rstate = numpy.array([seed] * 6, dtype='int32')

    for i in range(n_streams):
        stream_samples = []
        rstate = [curr_rstate.copy()]
        for j in range(1, n_substreams):
            rstate.append(rng_mrg.ff_2p72(rstate[-1]))
        rstate = numpy.asarray(rstate)
        rstate = theano.shared(rstate)

        new_rstate, sample = rng_mrg.mrg_uniform.new(rstate,
                                                     ndim=None,
                                                     dtype=config.floatX,
                                                     size=(n_substreams, ))
        # Not really necessary, just mimicking
        # rng_mrg.MRG_RandomStreams' behavior
        sample.rstate = rstate
        sample.update = (rstate, new_rstate)

        rstate.default_update = new_rstate
        f = theano.function([], sample)

        for k in range(n_samples):
            s = f()
            stream_samples.append(s)

        samples.append(numpy.array(stream_samples).T.flatten())

        # next stream
        curr_rstate = rng_mrg.ff_2p134(curr_rstate)

    samples = numpy.array(samples).flatten()
    assert (numpy.allclose(samples, java_samples))
コード例 #13
0
ファイル: test_rng_mrg.py プロジェクト: athiwatp/Theano
def test_consistency_cpu_parallel():
    """
    Verify that the random numbers generated by mrg_uniform, in parallel,
    are the same as the reference (Java) implementation by L'Ecuyer et al.

    """
    seed = 12345
    n_samples = 5
    n_streams = 12
    n_substreams = 7  # 7 samples will be drawn in parallel

    samples = []
    curr_rstate = np.array([seed] * 6, dtype='int32')

    for i in range(n_streams):
        stream_samples = []
        rstate = [curr_rstate.copy()]
        for j in range(1, n_substreams):
            rstate.append(rng_mrg.ff_2p72(rstate[-1]))
        rstate = np.asarray(rstate)
        rstate = theano.shared(rstate)

        new_rstate, sample = rng_mrg.mrg_uniform.new(rstate, ndim=None,
                                                     dtype=config.floatX,
                                                     size=(n_substreams,))
        # Not really necessary, just mimicking
        # rng_mrg.MRG_RandomStreams' behavior
        sample.rstate = rstate
        sample.update = (rstate, new_rstate)

        rstate.default_update = new_rstate
        f = theano.function([], sample)

        for k in range(n_samples):
            s = f()
            stream_samples.append(s)

        samples.append(np.array(stream_samples).T.flatten())

        # next stream
        curr_rstate = rng_mrg.ff_2p134(curr_rstate)

    samples = np.array(samples).flatten()
    assert(np.allclose(samples, java_samples))
コード例 #14
0
ファイル: test_rng_mrg.py プロジェクト: Thrandis/Theano
def test_overflow_gpu_new_backend():
    seed = 12345
    n_substreams = 7
    curr_rstate = np.array([seed] * 6, dtype='int32')
    rstate = [curr_rstate.copy()]
    for j in range(1, n_substreams):
        rstate.append(rng_mrg.ff_2p72(rstate[-1]))
    rstate = np.asarray(rstate)
    rstate = gpuarray_shared_constructor(rstate)
    fct = functools.partial(GPUA_mrg_uniform.new, rstate,
                            ndim=None, dtype='float32')
    # should raise error as the size overflows
    sizes = [(2**31, ), (2**32, ), (2**15, 2**16,), (2, 2**15, 2**15)]
    rng_mrg_overflow(sizes, fct, mode, should_raise_error=True)
    # should not raise error
    sizes = [(2**5, ), (2**5, 2**5), (2**5, 2**5, 2**5)]
    rng_mrg_overflow(sizes, fct, mode, should_raise_error=False)
    # should support int32 sizes
    sizes = [(np.int32(2**10), ),
             (np.int32(2), np.int32(2**10), np.int32(2**10))]
    rng_mrg_overflow(sizes, fct, mode, should_raise_error=False)
コード例 #15
0
def test_consistency_cpu_serial():
    # Verify that the random numbers generated by mrg_uniform, serially,
    # are the same as the reference (Java) implementation by L'Ecuyer et al.

    seed = 12345
    n_samples = 5
    n_streams = 12
    n_substreams = 7

    samples = []
    curr_rstate = np.array([seed] * 6, dtype='int32')

    for i in range(n_streams):
        stream_rstate = curr_rstate.copy()
        for j in range(n_substreams):
            rstate = theano.shared(
                np.array([stream_rstate.copy()], dtype='int32'))
            new_rstate, sample = rng_mrg.mrg_uniform.new(rstate,
                                                         ndim=None,
                                                         dtype=config.floatX,
                                                         size=(1, ))
            # Not really necessary, just mimicking
            # rng_mrg.MRG_RandomStreams' behavior
            sample.rstate = rstate
            sample.update = (rstate, new_rstate)

            rstate.default_update = new_rstate
            f = theano.function([], sample)
            for k in range(n_samples):
                s = f()
                samples.append(s)

            # next substream
            stream_rstate = rng_mrg.ff_2p72(stream_rstate)

        # next stream
        curr_rstate = rng_mrg.ff_2p134(curr_rstate)

    samples = np.array(samples).flatten()
    assert (np.allclose(samples, java_samples))