Esempio n. 1
0
def test_parallel_fork_cpu_only():
    pipeline_pairs = 4
    batch_size = 10
    iters = 40
    callback = utils.ExtCallback((4, 5), iters * batch_size, np.int32)
    parallel_pipes = [(utils.create_pipe(callback,
                                         'cpu',
                                         batch_size,
                                         py_num_workers=4,
                                         py_start_method='fork',
                                         parallel=True,
                                         device_id=None),
                       utils.create_pipe(callback,
                                         'cpu',
                                         batch_size,
                                         py_num_workers=4,
                                         py_start_method='fork',
                                         parallel=True,
                                         device_id=None))
                      for i in range(pipeline_pairs)]
    for pipe0, pipe1 in parallel_pipes:
        pipe0.build()
        pipe1.build()
        utils.capture_processes(pipe0._py_pool)
        utils.capture_processes(pipe1._py_pool)
        utils.compare_pipelines(pipe0, pipe1, batch_size, iters)
Esempio n. 2
0
def _test_permute_dataset(batch_size, epoch_size, trailing_samples, cb,
                          py_num_workers, prefetch_queue_depth,
                          reader_queue_depth):
    num_epochs = 3
    pipe = utils.create_pipe(cb,
                             "cpu",
                             batch_size=batch_size,
                             py_num_workers=py_num_workers,
                             py_start_method="spawn",
                             parallel=True,
                             device_id=0,
                             batch=False,
                             num_threads=1,
                             cycle=None,
                             prefetch_queue_depth=prefetch_queue_depth,
                             reader_queue_depth=reader_queue_depth)
    pipe.build()
    utils.capture_processes(pipe._py_pool)
    for epoch_idx in range(num_epochs):
        epoch_data = [
            False for _ in range(epoch_size * batch_size + trailing_samples)
        ]
        for _ in range(epoch_size):
            (batch, ) = pipe.run()
            assert len(batch) == batch_size
            for sample in batch:
                epoch_data[np.array(sample)[0]] = True
        assert sum(epoch_data) == epoch_size * batch_size, \
            "Epoch number {} did not contain some samples from data set".format(epoch_idx)
        try:
            pipe.run()
        except StopIteration:
            pipe.reset()
        else:
            assert False, "expected StopIteration"
Esempio n. 3
0
def _test_epoch_idx(batch_size, epoch_size, cb, py_num_workers,
                    prefetch_queue_depth, reader_queue_depth, batch_mode,
                    batch_info):
    num_epochs = 3
    pipe = utils.create_pipe(cb,
                             "cpu",
                             batch_size=batch_size,
                             py_num_workers=py_num_workers,
                             py_start_method="spawn",
                             parallel=True,
                             device_id=0,
                             batch=batch_mode,
                             num_threads=1,
                             cycle=None,
                             batch_info=batch_info,
                             prefetch_queue_depth=prefetch_queue_depth,
                             reader_queue_depth=reader_queue_depth)
    pipe.build()
    utils.capture_processes(pipe._py_pool)
    for epoch_idx in range(num_epochs):
        for iteration in range(epoch_size):
            (batch, ) = pipe.run()
            assert len(batch) == batch_size
            for sample_i, sample in enumerate(batch):
                expected = np.array([
                    iteration * batch_size + sample_i, sample_i, iteration,
                    epoch_idx if not batch_mode or batch_info else 0
                ])
                np.testing.assert_array_equal(sample, expected)
        try:
            pipe.run()
        except StopIteration:
            pipe.reset()
        else:
            assert False, "expected StopIteration"
Esempio n. 4
0
def _test_cycle_quiet_non_resetable(iterable, reader_queue_size, batch_size,
                                    epoch_size):
    pipe = utils.create_pipe(iterable,
                             "cpu",
                             batch_size=batch_size,
                             py_num_workers=1,
                             py_start_method="spawn",
                             parallel=True,
                             device_id=None,
                             batch=True,
                             num_threads=5,
                             cycle="quiet",
                             reader_queue_depth=reader_queue_size)
    pipe.build()
    utils.capture_processes(pipe._py_pool)
    for _ in range(epoch_size):
        pipe.run()
    try:
        pipe.run()
    except StopIteration:
        pipe.reset()
        try:
            pipe.run()
        except StopIteration:
            pass
        else:
            assert False, "Expected stop iteration"
    else:
        assert False, "Expected stop iteration at the end of the epoch"
Esempio n. 5
0
def _test_cycle_quiet(cb, is_gen_fun, batch_size, epoch_size,
                      reader_queue_size):
    pipe = utils.create_pipe(cb,
                             "cpu",
                             batch_size=batch_size,
                             py_num_workers=1,
                             py_start_method="spawn",
                             parallel=True,
                             device_id=None,
                             batch=True,
                             num_threads=5,
                             cycle="quiet",
                             reader_queue_depth=reader_queue_size)
    pipe.build()
    utils.capture_processes(pipe._py_pool)
    refer_iter = cb
    for i in range(3 * epoch_size + 1):
        if i % epoch_size == 0:
            if is_gen_fun:
                refer_iter = cb()
            else:
                refer_iter = iter(cb)
        (batch, ) = pipe.run()
        expected_batch = next(refer_iter)
        assert len(batch) == len(expected_batch), \
            f"Batch length mismatch: expected {len(expected_batch)}, got {len(batch)}"
        for sample, expected_sample in zip(batch, expected_batch):
            np.testing.assert_equal(sample, expected_sample)
Esempio n. 6
0
def test_pytorch_cuda_context():
    # Create a dummy torch CUDA tensor so we acquire CUDA context
    cuda0 = torch.device('cuda:0')
    _ = torch.ones([1, 1], dtype=torch.float32, device=cuda0)
    callback = utils.ExtCallback((4, 5), 10, np.int32)
    pipe = utils.create_pipe(callback, 'cpu', 5, py_num_workers=6, py_start_method='fork',
                             parallel=True)
    pipe.start_py_workers()
Esempio n. 7
0
def _test_exception_propagation(callback, batch_size, num_workers, expected):
    pipe = utils.create_pipe(callback,
                             'cpu',
                             batch_size,
                             py_num_workers=num_workers,
                             py_start_method='spawn',
                             parallel=True)
    raises(expected)(utils.build_and_run_pipeline)(pipe, None)
Esempio n. 8
0
def check_source_build(source):
    pipe = utils.create_pipe(source,
                             'cpu',
                             10,
                             py_num_workers=4,
                             py_start_method='spawn',
                             parallel=True)
    pipe.build()
Esempio n. 9
0
def test_mxnet_cuda():
    callback = ExtCallbackMXCuda((4, 5), 10, np.int32)
    pipe = create_pipe(callback,
                       'cpu',
                       5,
                       py_num_workers=6,
                       py_start_method='spawn',
                       parallel=True)
    build_and_run_pipeline(pipe)
Esempio n. 10
0
def _test_layout(callback, batch_size, layout, num_workers):
    pipe = utils.create_pipe(callback,
                             'cpu',
                             batch_size,
                             layout=layout,
                             py_num_workers=num_workers,
                             py_start_method='spawn',
                             parallel=True)
    utils.check_layout(pipe, layout)
Esempio n. 11
0
def test_parallel_fork():
    epoch_size = 250
    callback = utils.ExtCallback((4, 5), epoch_size, np.int32)
    pipes = [(utils.create_pipe(callback,
                                'cpu',
                                batch_size,
                                py_num_workers=num_workers,
                                py_start_method='fork',
                                parallel=True),
              utils.create_pipe(callback, 'cpu', batch_size,
                                parallel=False), dtype, batch_size)
             for dtype in [np.float32, np.int16] for num_workers in [1, 3, 4]
             for batch_size in [1, 16, 150, 250]]
    pipes.append((utils.create_pipe(Iterable(32, (4, 5), dtype=np.int16),
                                    'cpu',
                                    32,
                                    py_num_workers=1,
                                    py_start_method='fork',
                                    parallel=True,
                                    batch=True),
                  utils.create_pipe(Iterable(32, (4, 5), dtype=np.int16),
                                    'cpu',
                                    32,
                                    parallel=False,
                                    batch=True), np.int16, 32))
    for parallel_pipe, _, _, _ in pipes:
        parallel_pipe.start_py_workers()
    for parallel_pipe, pipe, dtype, batch_size in pipes:
        yield utils.check_callback, parallel_pipe, pipe, epoch_size, batch_size, dtype
        # explicitely call py_pool close
        # as nose might still reference parallel_pipe from the yield above
        parallel_pipe._py_pool.close()
    # test that another pipline with forking initialization fails
    # as there is CUDA contexts already initialized
    parallel_pipe = utils.create_pipe(callback,
                                      'cpu',
                                      16,
                                      py_num_workers=4,
                                      py_start_method='fork',
                                      parallel=True)
    yield raises(
        RuntimeError,
        "Cannot fork a process when the CUDA has been initialized in the process."
    )(utils.build_and_run_pipeline), parallel_pipe, 1
Esempio n. 12
0
def test_parallel_no_workers():
    batch_size = 10
    iters = 4
    callback = utils.ExtCallback((4, 5), iters * batch_size, np.int32)
    parallel_pipe = utils.create_pipe(callback,
                                      'cpu',
                                      batch_size,
                                      py_num_workers=0,
                                      py_start_method='spawn',
                                      parallel=True,
                                      device_id=None)
    parallel_pipe.build()
    assert parallel_pipe._py_pool is None
    assert not parallel_pipe._py_pool_started
Esempio n. 13
0
def _test_cycle_raise(cb, is_gen_fun, batch_size, epoch_size,
                      reader_queue_size):
    pipe = utils.create_pipe(cb,
                             "cpu",
                             batch_size=batch_size,
                             py_num_workers=1,
                             py_start_method="spawn",
                             parallel=True,
                             device_id=None,
                             batch=True,
                             num_threads=5,
                             cycle="raise",
                             reader_queue_depth=reader_queue_size)
    pipe.build()
    utils.capture_processes(pipe._py_pool)
    if is_gen_fun:
        refer_iter = cb()
    else:
        refer_iter = cb
    for _ in range(3):
        i = 0
        while True:
            try:
                (batch, ) = pipe.run()
                expected_batch = next(refer_iter)
                assert len(batch) == len(expected_batch), \
                    f"Batch length mismatch: expected {len(expected_batch)}, got {len(batch)}"
                for sample, expected_sample in zip(batch, expected_batch):
                    np.testing.assert_equal(sample, expected_sample)
                i += 1
            except StopIteration:
                pipe.reset()
                if is_gen_fun:
                    refer_iter = cb()
                else:
                    refer_iter = iter(cb)
                assert i == epoch_size, \
                    f"Number of iterations mismatch: expected {epoch_size}, got {i}"
                break
Esempio n. 14
0
def test_pytorch_cuda():
    callback = ExtCallbackTorchCuda((4, 5), 10, np.int32)
    pipe = utils.create_pipe(callback, 'cpu', 5, py_num_workers=6, py_start_method='spawn',
                             parallel=True)
    utils.build_and_run_pipeline(pipe)