def test_deprecated_double_def():
    error_msg = ("Usage of `{}` is deprecated in favor of `output_{}`*only `output_{}` "
                 "should be provided.")
    shapes_error_msg = error_msg.format(*(("shapes",) * 3))
    yield raises(ValueError, shapes_error_msg)(dali_pipe_deprecated), \
        {"shapes": 2, "output_shapes": 2, "dtypes": tf.uint8}, 2, tf.uint8, dali_types.UINT8, 1, 2
    dtypes_error_msg = error_msg.format(*(("dtypes",) * 3))
    yield raises(ValueError, dtypes_error_msg)(dali_pipe_deprecated), \
        {"shapes": 2, "dtypes": tf.uint8, "output_dtypes": tf.uint8}, \
        2, tf.uint8, dali_types.UINT8, 1, 2
def test_queue_large_failure():
    max_int32 = 2**31 - 1
    max_uint32 = 2**32 - 1
    error_message = "Failed to serialize object as C-like structure. " \
                    "Tried to populate following fields:"
    for start_method in ("spawn", "fork"):
        yield raises(RuntimeError, error_message)(_test_queue_large), \
            start_method, [(max_int32 + 1, 0, max_uint32, max_uint32, max_uint32)]
        yield raises(RuntimeError, error_message)(_test_queue_large), \
            start_method, [(max_int32, max_int32, -1, 0, 0)]
def run_checks(samples_allowed, batches_allowed, samples_disallowed, batches_disallowed):
    for sample, baseline in samples_allowed:
        yield passes_assert, external_source_impl.assert_cpu_sample_data_type, sample
        yield converts, external_source_impl.sample_to_numpy, sample, baseline
    for sample, baseline in samples_allowed + batches_allowed:
        yield passes_assert, external_source_impl.assert_cpu_batch_data_type, sample
        yield converts, external_source_impl.batch_to_numpy, sample, baseline
    for sample in samples_disallowed:
        yield raises(TypeError, "Unsupported callback return type.")(
            external_source_impl.assert_cpu_sample_data_type), sample
    for sample in samples_disallowed + batches_disallowed:
        yield raises(TypeError, "Unsupported callback return type")(
            external_source_impl.assert_cpu_batch_data_type), sample
def test_batch_1_wrong_shape():
    for shape in [(2, None, None, None), (None, None, 4), (2, None, None, 4), (None, 0, None, 3)]:
        yield raises(
            tf.errors.InvalidArgumentError,
            "The shape provided for output `0` is not compatible with the "
            "shape returned by DALI Pipeline"
            )(dali_pipe_batch_1), shape, tf.uint8
Esempio n. 5
0
def test_parallel_fork():
    epoch_size = 250
    callback = ExtCallback((4, 5), epoch_size, np.int32)
    pipes = [(
        create_pipe(
            callback, 'cpu', batch_size, py_num_workers=num_workers, py_start_method='fork',
            parallel=True),
        create_pipe(callback, 'cpu', batch_size, parallel=False),
        dtype, batch_size)
        for dtype in [np.float32, np.int16]
        for num_workers in [1, 3, 4] for batch_size in [1, 16, 150, 250]]
    pipes.append((
        create_pipe(
            Iterable(32, (4, 5), dtype=np.int16), 'cpu', 32, py_num_workers=1, py_start_method='fork',
            parallel=True, batch=True),
        create_pipe(Iterable(32, (4, 5), dtype=np.int16), 'cpu', 32, parallel=False, batch=True),
        np.int16, 32))
    for parallel_pipe, _, _, _ in pipes:
        parallel_pipe.start_py_workers()
    for parallel_pipe, pipe, dtype, batch_size in pipes:
        yield check_callback, parallel_pipe, pipe, epoch_size, batch_size, dtype
        # explicitely call py_pool close as nose might still reference parallel_pipe from the yield above
        parallel_pipe._py_pool.close()
    # test that another pipline with forking initialization fails as there is CUDA contexts already initialized
    parallel_pipe = create_pipe(callback, 'cpu', 16, py_num_workers=4,
                                py_start_method='fork', parallel=True)
    yield raises(RuntimeError, "Cannot fork a process when the CUDA has been initialized in the process.")(
        build_and_run_pipeline), parallel_pipe, 1
Esempio n. 6
0
def test_batch_info_flag_default():
    batch_size = 5
    cb_int = BatchCb(False, batch_size, 1)
    yield _test_batch_info_flag_default, cb_int, batch_size
    cb_batch_info = BatchCb(True, batch_size, 1)
    yield raises(AssertionError, "Expected BatchInfo instance as cb argument")(
        _test_batch_info_flag_default), cb_batch_info, batch_size
def test_artificial_no_match():
    batch = 10
    for shape in [(batch + 1, None, None, None), (None, None, 3), (batch, 2, 1, 1)]:
        yield raises(
            tf.errors.InvalidArgumentError,
            "The shape provided for output `0` is not compatible with the shape returned by DALI Pipeline"
            )(dali_pipe_artificial_shape), shape, tf.uint8, dali_types.UINT8, batch
Esempio n. 8
0
def test_nan_check():
    err_msg = "Argument 'fdata' for operator 'constant' unexpectedly changed value from*"
    for values in [[np.nan, 1], [1, np.nan]]:
        yield raises(RuntimeError, glob=err_msg)(_test_nan_check), values

    for values in [[1, 1], [np.nan, np.nan]]:
        yield _test_nan_check, values
Esempio n. 9
0
def test_fail_sequence_rearrange():
    shape = [5, 1]
    orders = [([6, 7], False), ([-1], False), ([], False),
              ([np.int32([0]), np.int32([])], True),
              ([np.int32([6, 7]), np.int32([0])], True),
              ([np.int32([-1]), np.int32([0])], True),
              ([np.int32([[1], [2]]),
                np.int32([[1], [2]])], True)]
    error_msgs = [
        'new_order[[]*[]] must be between * and input_sequence_length = * for sample *, but it is: *',
        'new_order[[]*[]] must be between * and input_sequence_length = * for sample *, but it is: *',
        'Empty result sequences are not allowed',
        'Empty `new_order` for sample * is not allowed',
        'new_order[[]*[]] must be between * and input_sequence_length = * for sample *, but it is: *',
        'new_order[[]*[]] must be between * and input_sequence_length = * for sample *, but it is: *',
        'Input with dimension * cannot be converted to dimension *'
    ]

    assert len(orders) == len(error_msgs)

    for dev in ["cpu", "gpu"]:
        for [new_order, per_sample], error_msg in zip(orders, error_msgs):
            yield raises(
                RuntimeError,
                glob=error_msg)(check_fail_sequence_rearrange
                                ), 2, shape, new_order, per_sample, dev
def test_non_uniform_batch():
    batches_disallowed = [
        [test_array, np.array([[42, 42]], dtype=np.uint8)],
        non_uniform_tl()
    ]
    for b in batches_disallowed:
        yield raises(ValueError, "Uniform input is required (batch of tensors of equal shapes)")(
            external_source_impl.batch_to_numpy), b
Esempio n. 11
0
def test_type_returns():
    for tf_t, dali_t in matching_types:
        yield dali_pipe_types, tf_t, dali_t
    for tf_t, dali_t in not_matching_types:
        yield raises(
            tf.errors.InvalidArgumentError,
            "The type provided for output `0` is not compatible with the type returned by DALI Pipeline"
            )(dali_pipe_types), tf_t, dali_t
def test_slice_with_out_of_bounds_error():
    in_shape = (40, 80, 3)
    for device in ['gpu', 'cpu']:
        for batch_size in [1, 3]:
            yield raises(
                RuntimeError,
                "Slice can't be placed out of bounds with current policy."
            )(check_cmn_with_out_of_bounds_error), device, batch_size, in_shape
Esempio n. 13
0
def test_cycle_no_resetting():
    batch_size = 20
    for epoch_size, cb in [
        (1, Iterable(batch_size, (4, 5), epoch_size=1)),
        (4, Iterable(batch_size, (4, 5), epoch_size=4)),
        (1, generator_epoch_size_1),
        (4, generator_epoch_size_4),]:
        for reader_queue_size in (1, 2, 6):
            yield raises(StopIteration)(_test_cycle_no_resetting), cb, batch_size, epoch_size, reader_queue_size
Esempio n. 14
0
def test_queue_full_assertion():
    for start_method in ("spawn", "fork"):
        for capacity in [1, 4]:
            for one_by_one in (True, False):
                mp = multiprocessing.get_context(start_method)
                queue = ShmQueue(mp, capacity)
                msgs = [
                    ShmMessageDesc(i, i, i, i, i) for i in range(capacity + 1)
                ]
                yield raises(
                    RuntimeError,
                    "The queue is full")(_put_msgs), queue, msgs, one_by_one
Esempio n. 15
0
def test_wrong_layouts_sequence_rearrange():
    shape = [5, 1]
    new_order = [0, 2, 1, 3, 4]
    per_sample = False
    for dev in ["cpu", "gpu"]:
        for layout in ["HF", "HW"]:
            yield raises(
                RuntimeError,
                glob=
                'Expected sequence as the input, where outermost dimension represents frames dimension `F`, '
                'got data with layout = "H[WF]"')(
                    check_fail_sequence_rearrange
                ), 5, shape, new_order, per_sample, dev, layout
Esempio n. 16
0
def test_wrong_source():
    callable_msg = ("Callable passed to External Source in parallel mode (when `parallel=True`) "
        "must accept exactly one argument*. Got {} instead.")
    batch_required_msg = "Parallel external source with {} must be run in a batch mode"
    disallowed_sources = [
        (no_arg_fun, (TypeError, callable_msg.format("a callable that does not accept arguments"))),
        (multi_arg_fun, (TypeError, "External source callback must be a callable with 0 or 1 argument")),
        (Iterable(), (TypeError, batch_required_msg.format("an iterable"))),
        (generator_fun, (TypeError, batch_required_msg.format("a generator function"))),
        (generator_fun(), (TypeError, batch_required_msg.format("an iterable"))),
    ]
    for source, (error_type, error_msg) in disallowed_sources:
        yield raises(error_type, error_msg)(check_source_build), source
Esempio n. 17
0
def test_squeeze_throw_error():
    args_list = [
        ([1], None, None, [(300, 1, 200), (10, 10, 10)]),
        (None, "C", "XYZ", [(2, 3, 4), (4, 2, 3)]),
        (None, "Z", "XYZ", [(1, 1, 10)]),
        ([2], "Z", "XYZ", [[1, 1, 10]]),
        ([2, 1], None, "XYZ", [(100, 0, 0)]),
        ([1, 1], None, "XYZ", [(300, 1, 200), (10, 1, 10)]),
    ]
    expected_errors = [
        "Requested a shape with 100 elements but the original shape has 1000 elements.",
        "Axis 'C' is not present in the input layout",
        "Requested a shape with 1 elements but the original shape has 10 elements.",
        "Provided both ``axes`` and ``axis_names`` arguments",
        "Requested a shape with 100 elements but the original shape has 0 elements.",
        "Specified at least twice same dimension to remove."
    ]
    assert len(expected_errors) == len(args_list)
    for (axes, axis_names, layout,
         shapes), error_msg in zip(args_list, expected_errors):
        yield raises(RuntimeError, error_msg)(
            _test_squeeze_throw_error), axes, axis_names, layout, shapes
def test_no_output_dtypes():
    expected_msg = ("`output_dtypes` should be provided as single tf.DType value or a tuple of "
                    "tf.DType values")
    yield raises(TypeError, expected_msg)(dali_pipe_deprecated), \
        {"shapes": 2}, 2, tf.uint8, dali_types.UINT8, 1, 2
Esempio n. 19
0
def _test_exception_propagation(callback, batch_size, num_workers, expected):
    pipe = create_pipe(
        callback, 'cpu', batch_size, py_num_workers=num_workers,
        py_start_method='spawn', parallel=True)
    raises(expected)(build_and_run_pipeline)(pipe, None)
Esempio n. 20
0
def test_no_output_dtypes():
    yield raises(TypeError,
        "`output_dtypes` should be provided as single tf.DType value or a tuple of tf.DType values")(
            dali_pipe_deprecated), { "shapes": 2, }, 2, tf.uint8, dali_types.UINT8, 1, 2
def test_multiple_input_invalid():
    for batch in [1, 10]:
        for shapes in [(None,), (batch, 200, 200, 3, None), (None, None, None)]:
            yield raises(ValueError, "The two structures don't have the same sequence length.")(
                dali_pipe_multiple_out), shapes, (tf.uint8, tf.uint8), batch
Esempio n. 22
0
def test_zero_dim_not_allowed():
    expected_msg = "Cannot mark zero-dimensional input as a sequence"
    for device in ["cpu", "gpu"]:
        yield raises(RuntimeError, expected_msg)(run_pipeline), device, 0