Exemple #1
0
def _test_cycle_multiple_iterators(batch_size, iters_num, py_num_workers,
                                   reader_queue_sizes, cycle_policies,
                                   epoch_sizes):
    @dali.pipeline_def(batch_size=batch_size,
                       num_threads=4,
                       device_id=None,
                       py_num_workers=py_num_workers,
                       py_start_method='spawn')
    def pipeline(sample_cb, iter_1, iter_2, parallel):
        if parallel:
            queue_size_0, queue_size_1, queue_size_2 = reader_queue_sizes
        else:
            queue_size_0, queue_size_1, queue_size_2 = None, None, None
        cycle_1, cycle_2 = cycle_policies
        sample_out = dali.fn.external_source(source=sample_cb,
                                             parallel=parallel,
                                             batch=False,
                                             prefetch_queue_depth=queue_size_0)
        iter1_out = dali.fn.external_source(source=iter_1,
                                            parallel=parallel,
                                            batch=True,
                                            prefetch_queue_depth=queue_size_1,
                                            cycle=cycle_1)
        iter2_out = dali.fn.external_source(source=iter_2,
                                            parallel=parallel,
                                            batch=True,
                                            prefetch_queue_depth=queue_size_2,
                                            cycle=cycle_2)
        return (sample_out, iter1_out, iter2_out)

    shape = (2, 3)
    sample_epoch_size, iter_1_epoch_size, iter_2_epoch_size = epoch_sizes
    sample_cb = utils.ExtCallback((4, 5), sample_epoch_size * batch_size,
                                  np.int32)
    iter_1 = Iterable(batch_size,
                      shape,
                      epoch_size=iter_1_epoch_size,
                      dtype=np.int32)
    iter_2 = Iterable(batch_size,
                      shape,
                      epoch_size=iter_2_epoch_size,
                      dtype=np.int32)
    pipe_parallel = pipeline(sample_cb, iter_1, iter_2, parallel=True)
    pipe_seq = pipeline(sample_cb, iter_1, iter_2, parallel=False)
    pipe_parallel.build()
    utils.capture_processes(pipe_parallel._py_pool)
    pipe_seq.build()
    parallel_outs = collect_iterations(pipe_parallel, iters_num)
    seq_outs = collect_iterations(pipe_seq, iters_num)
    assert len(parallel_outs) == len(seq_outs)
    for parallel_out, seq_out in zip(parallel_outs, seq_outs):
        if parallel_out == StopIteration or seq_out == StopIteration:
            assert parallel_out == seq_out
            continue
        assert len(parallel_out) == len(seq_out) == 3
        for batch_parallel, batch_seq in zip(parallel_out, seq_out):
            assert len(batch_parallel) == len(batch_seq) == batch_size
            for sample_parallel, sample_seq in zip(batch_parallel, batch_seq):
                np.testing.assert_equal(np.array(sample_parallel),
                                        np.array(sample_seq))
Exemple #2
0
def test_parallel_fork_cpu_only():
    pipeline_pairs = 4
    batch_size = 10
    iters = 40
    callback = utils.ExtCallback((4, 5), iters * batch_size, np.int32)
    parallel_pipes = [(utils.create_pipe(callback,
                                         'cpu',
                                         batch_size,
                                         py_num_workers=4,
                                         py_start_method='fork',
                                         parallel=True,
                                         device_id=None),
                       utils.create_pipe(callback,
                                         'cpu',
                                         batch_size,
                                         py_num_workers=4,
                                         py_start_method='fork',
                                         parallel=True,
                                         device_id=None))
                      for i in range(pipeline_pairs)]
    for pipe0, pipe1 in parallel_pipes:
        pipe0.build()
        pipe1.build()
        utils.capture_processes(pipe0._py_pool)
        utils.capture_processes(pipe1._py_pool)
        utils.compare_pipelines(pipe0, pipe1, batch_size, iters)
Exemple #3
0
def test_pytorch_cuda_context():
    # Create a dummy torch CUDA tensor so we acquire CUDA context
    cuda0 = torch.device('cuda:0')
    _ = torch.ones([1, 1], dtype=torch.float32, device=cuda0)
    callback = utils.ExtCallback((4, 5), 10, np.int32)
    pipe = utils.create_pipe(callback, 'cpu', 5, py_num_workers=6, py_start_method='fork',
                             parallel=True)
    pipe.start_py_workers()
Exemple #4
0
def test_exception_propagation():
    for raised, expected in [(StopIteration, StopIteration),
                             (utils.CustomException, Exception)]:
        callback = utils.ExtCallback((4, 4),
                                     250,
                                     np.int32,
                                     exception_class=raised)
        for num_workers in [1, 4]:
            for batch_size in [1, 15, 150]:
                yield _test_exception_propagation, callback, batch_size, num_workers, expected
Exemple #5
0
def test_parallel_no_workers():
    batch_size = 10
    iters = 4
    callback = utils.ExtCallback((4, 5), iters * batch_size, np.int32)
    parallel_pipe = utils.create_pipe(callback,
                                      'cpu',
                                      batch_size,
                                      py_num_workers=0,
                                      py_start_method='spawn',
                                      parallel=True,
                                      device_id=None)
    parallel_pipe.build()
    assert parallel_pipe._py_pool is None
    assert not parallel_pipe._py_pool_started
Exemple #6
0
def test_parallel_fork():
    epoch_size = 250
    callback = utils.ExtCallback((4, 5), epoch_size, np.int32)
    pipes = [(utils.create_pipe(callback,
                                'cpu',
                                batch_size,
                                py_num_workers=num_workers,
                                py_start_method='fork',
                                parallel=True),
              utils.create_pipe(callback, 'cpu', batch_size,
                                parallel=False), dtype, batch_size)
             for dtype in [np.float32, np.int16] for num_workers in [1, 3, 4]
             for batch_size in [1, 16, 150, 250]]
    pipes.append((utils.create_pipe(Iterable(32, (4, 5), dtype=np.int16),
                                    'cpu',
                                    32,
                                    py_num_workers=1,
                                    py_start_method='fork',
                                    parallel=True,
                                    batch=True),
                  utils.create_pipe(Iterable(32, (4, 5), dtype=np.int16),
                                    'cpu',
                                    32,
                                    parallel=False,
                                    batch=True), np.int16, 32))
    for parallel_pipe, _, _, _ in pipes:
        parallel_pipe.start_py_workers()
    for parallel_pipe, pipe, dtype, batch_size in pipes:
        yield utils.check_callback, parallel_pipe, pipe, epoch_size, batch_size, dtype
        # explicitely call py_pool close
        # as nose might still reference parallel_pipe from the yield above
        parallel_pipe._py_pool.close()
    # test that another pipline with forking initialization fails
    # as there is CUDA contexts already initialized
    parallel_pipe = utils.create_pipe(callback,
                                      'cpu',
                                      16,
                                      py_num_workers=4,
                                      py_start_method='fork',
                                      parallel=True)
    yield raises(
        RuntimeError,
        "Cannot fork a process when the CUDA has been initialized in the process."
    )(utils.build_and_run_pipeline), parallel_pipe, 1
Exemple #7
0
def test_all_kinds_parallel():
    for batch_size in (1, 17):
        for num_iters in (1, 3, 31):
            for trailing in (0, 30):
                if trailing >= batch_size:
                    continue
                epoch_size = num_iters * batch_size + trailing
                sample_cb = utils.ExtCallback((4, 5), epoch_size, np.int32)
                batch_cb = SampleCallbackBatched(sample_cb,
                                                 batch_size,
                                                 batch_info=True)
                iterator_cb = SampleCallbackIterator(sample_cb,
                                                     batch_size,
                                                     batch_info=True)
                for reader_queue_sizes in ((1, 1, 1), (2, 2, 2), (5, 5, 5),
                                           (3, 1, 1), (1, 3, 1), (1, 1, 3)):
                    for num_workers in (1, 7):
                        yield _test_all_kinds_parallel, sample_cb, batch_cb, iterator_cb, \
                              batch_size, num_workers, reader_queue_sizes, num_iters
Exemple #8
0
def test_layout():
    for layout, dims in zip(["X", "XY", "XYZ"], ((4, ), (4, 4), (4, 4, 4))):
        callback = utils.ExtCallback(dims, 1024, 'int32')
        for num_workers in [1, 4]:
            for batch_size in [1, 256, 600]:
                yield _test_layout, callback, batch_size, layout, num_workers
Exemple #9
0
def test_stop_iteration_resume():
    callback = utils.ExtCallback((4, 4), 250, 'int32')
    layout = "XY"
    for num_workers in [1, 4]:
        for batch_size in [1, 15, 150]:
            yield _test_stop_iteration_resume, callback, batch_size, layout, num_workers