Beispiel #1
0
def check_flip_vs_numpy(device, batch_size, vertical, horizontal):
    if vertical and horizontal:
        python_func = flip_vertical_horizontal
    else:
        python_func = flip_vertical if vertical else flip_horizontal
    compare_pipelines(FlipPipeline(device, batch_size, is_vertical=vertical, is_horizontal=horizontal),
                      FlipPythonOpPipeline(batch_size, python_func),
                      batch_size=batch_size, N_iterations=10)
Beispiel #2
0
def check_cast_operator_float16(device, batch_size, in_type, out_type):
    input_shape=(300, 400, 3)
    eii1 = RandomlyShapedDataIterator(batch_size, max_shape=input_shape, dtype=in_type)
    eii2 = RandomlyShapedDataIterator(batch_size, max_shape=input_shape, dtype=in_type)
    compare_pipelines(
        CastPipeline(device, batch_size, iter(eii1), [types.FLOAT16, out_type]),
        CastPipeline(device, batch_size, iter(eii2), [out_type]),
        batch_size=batch_size, N_iterations=5)
Beispiel #3
0
def check_image_decoder_slice_alias(new_op, old_op, file_root, device,
                                    use_fast_idct):
    new_pipe = decoder_slice_pipe(new_op, file_root, device, use_fast_idct)
    legacy_pipe = decoder_slice_pipe(old_op, file_root, device, use_fast_idct)
    compare_pipelines(new_pipe,
                      legacy_pipe,
                      batch_size=batch_size_test,
                      N_iterations=3)
Beispiel #4
0
def check_FastDCT_body(batch_size, img_type, device):
    data_path = os.path.join(test_data_root, good_path, img_type)
    compare_pipelines(DecoderPipeline(data_path=data_path, batch_size=batch_size, num_threads=3,
                                      device_id=0, device=device, use_fast_idct=False),
                      DecoderPipeline(data_path=data_path, batch_size=batch_size, num_threads=3,
                                      device_id=0, device='cpu', use_fast_idct=True),
                      # average difference should be no bigger by off-by-3
                      batch_size=batch_size, N_iterations=3, eps=3)
def test_color_twist_vs_cpu():
    batch_size = 8
    seed = 1919
    rand_it1 = RandomDataIterator(batch_size, shape=(1024, 512, 3))
    rand_it2 = RandomDataIterator(batch_size, shape=(1024, 512, 3))
    compare_pipelines(ColorTwistPipeline(batch_size, seed, iter(rand_it1), kind="new"),
                      ColorTwistPipeline(batch_size, seed, iter(rand_it2), kind="oldCpu"),
                      batch_size=batch_size, N_iterations=16, eps=1)
def check_natural_logarithm(device, batch_size, input_shape):
    eii1 = RandomDataIterator(batch_size, shape=input_shape, dtype=np.float32)
    eii2 = RandomDataIterator(batch_size, shape=input_shape, dtype=np.float32)
    compare_pipelines(NLDaliPipeline(device, iter(eii1), batch_size),
                      NLPythonPipeline(iter(eii2), batch_size),
                      batch_size=batch_size,
                      N_iterations=5,
                      eps=1e-04)
Beispiel #7
0
def test_slice_extract_channel_gpu():
    for batch_size in {1, 32, 64}:
        eii = SliceArgsIteratorExtractFirstChannel(batch_size)
        compare_pipelines(SlicePipeline('gpu', batch_size, iter(eii)),
                          PythonOperatorPipeline(extract_first_channel,
                                                 batch_size),
                          batch_size=batch_size,
                          N_iterations=10)
Beispiel #8
0
def check_flip(batch_size, layout, shape, device):
    eiis = [RandomDataIterator(batch_size, shape=shape) for k in range(2)]
    compare_pipelines(SynthFlipPipeline(batch_size, layout, iter(eiis[0]),
                                        device),
                      SynthPythonFlipPipeline(batch_size, layout,
                                              iter(eiis[1])),
                      batch_size=batch_size,
                      N_iterations=5)
Beispiel #9
0
def _test_injection(device, name, transform, eps=1e-07):
    print(f'\nTesting {name}')
    pipe_load = load_images_pipeline()
    pipe_load.build()
    pipe_standard = injection_pipeline_standard(device)
    pipe_debug = injection_pipeline(lambda: transform(pipe_load.run()[0]),
                                    device)
    compare_pipelines(pipe_standard, pipe_debug, 8, 10, eps=eps)
Beispiel #10
0
def test_init_config_pipeline():
    pipe_standard = init_config_pipeline(batch_size=8,
                                         num_threads=3,
                                         device_id=0)
    pipe_debug = init_config_pipeline(batch_size=8,
                                      num_threads=3,
                                      device_id=0,
                                      debug=True)
    compare_pipelines(pipe_standard, pipe_debug, 8, 10)
Beispiel #11
0
def compare(pipe1, pipe2, max_err):
    epoch_size = pipe1.epoch_size("Reader")
    batch_size = pipe1.max_batch_size
    niter = (epoch_size + batch_size - 1) // batch_size
    compare_pipelines(pipe1,
                      pipe2,
                      batch_size,
                      niter,
                      max_allowed_error=max_err)
Beispiel #12
0
def check_transpose_vs_numpy(device, batch_size, dim, total_volume, permutation):
    max_shape = [int(math.pow(total_volume/batch_size, 1/dim))] * dim
    print("Testing", device, "backend with batch of", batch_size, "max size", max_shape)
    print("permutation ", permutation)
    eii1 = RandomlyShapedDataIterator(batch_size, max_shape=max_shape)
    eii2 = RandomlyShapedDataIterator(batch_size, max_shape=max_shape)
    compare_pipelines(TransposePipeline(device, batch_size, "", iter(eii1), permutation=permutation),
                      PythonOpPipeline(lambda x: transpose_func(x, permutation), batch_size, "", iter(eii2)),
                      batch_size=batch_size, N_iterations=5)
Beispiel #13
0
def check_crop_NHWC_vs_python_op_crop(device, batch_size):
    eii1 = RandomDataIterator(batch_size, shape=(600, 800, 3))
    eii2 = RandomDataIterator(batch_size, shape=(600, 800, 3))
    compare_pipelines(CropSequencePipeline(device, batch_size, types.NHWC,
                                           iter(eii1)),
                      CropSequencePythonOpPipeline(crop_NHWC_func, batch_size,
                                                   types.NHWC, iter(eii2)),
                      batch_size=batch_size,
                      N_iterations=10)
def check_cmn_cpu_vs_gpu(batch_size, output_dtype, output_layout, mirror_probability, mean, std, pad_output):
    iterations = 8 if batch_size == 1 else 1
    compare_pipelines(CropMirrorNormalizePipeline('cpu', batch_size, output_dtype=output_dtype,
                                                  output_layout=output_layout, mirror_probability=mirror_probability,
                                                  mean=mean, std=std, pad_output=pad_output),
                      CropMirrorNormalizePipeline('gpu', batch_size, output_dtype=output_dtype,
                                                  output_layout=output_layout, mirror_probability=mirror_probability,
                                                  mean=mean, std=std, pad_output=pad_output),
                      batch_size=batch_size, N_iterations=iterations)
Beispiel #15
0
def test_slice_args_WH_vs_args_HWC():
    for device in {'cpu', 'gpu'}:
        for batch_size in {3, 32, 64}:
            eii1 = SliceArgsIterator(batch_size)
            eii2 = SliceArgsIteratorAllDims(batch_size)

            compare_pipelines(SlicePipeline(device, batch_size, iter(eii1), is_fused_decoder=False),
                              SlicePipeline(device, batch_size, iter(eii2), is_fused_decoder=False),
                              batch_size=batch_size, N_iterations=10)
Beispiel #16
0
def test_slice_vs_fused_decoder():
    for device in {'cpu', 'gpu'}:
        for batch_size in {1, 13, 64}:
            eii1 = SliceArgsIterator(batch_size)
            eii2 = SliceArgsIterator(batch_size)

            compare_pipelines(SlicePipeline(device, batch_size, iter(eii1), is_fused_decoder=True),
                              SlicePipeline(device, batch_size, iter(eii2), is_fused_decoder=False),
                              batch_size=batch_size, N_iterations=10)
Beispiel #17
0
def test_crop_sequence_old_crop_vs_new_crop_gpu():
    batch_size = 4
    compare_pipelines(CropSequencePipeline('gpu', batch_size,
                                           is_old_crop=True),
                      CropSequencePipeline('gpu',
                                           batch_size,
                                           is_old_crop=False),
                      batch_size=batch_size,
                      N_iterations=10)
Beispiel #18
0
def check_crop_no_cast_vs_cast_to_float_and_back(device, batch_size):
    compare_pipelines(CropCastPipeline(device,
                                       batch_size,
                                       should_perform_cast=False),
                      CropCastPipeline(device,
                                       batch_size,
                                       should_perform_cast=True),
                      batch_size=batch_size,
                      N_iterations=10)
Beispiel #19
0
def check_nonsilence_operator(batch_size, cutoff_value, window_size,
                              reference_power, reset_interval, eps):
    test_utils.compare_pipelines(
        NonsilenceDaliPipeline(batch_size, cutoff_value, window_size,
                               reference_power, reset_interval),
        NonsilenceRosaPipeline(batch_size, -cutoff_value, window_size,
                               reference_power, reset_interval),
        batch_size=batch_size,
        N_iterations=3,
        eps=eps)
Beispiel #20
0
def check_operator_spectrogram_vs_python(device, batch_size, input_shape,
                                         nfft, window_length, window_step):
    eii1 = RandomDataIterator(batch_size, shape=input_shape, dtype=np.float32)
    eii2 = RandomDataIterator(batch_size, shape=input_shape, dtype=np.float32)
    compare_pipelines(
        SpectrogramPipeline(device, batch_size, iter(eii1), nfft=nfft, window=None,
                            window_length=window_length, window_step=window_step),
        SpectrogramPythonPipeline(device, batch_size, iter(eii2), window=None, nfft=nfft,
                                  window_length=window_length, window_step=window_step),
        batch_size=batch_size, N_iterations=5, eps=1e-04)
Beispiel #21
0
def check_deserialization(batch_size, num_threads, shape):
    ref_pipe = TestPipeline(batch_size=batch_size,
                            num_threads=num_threads,
                            shape=shape)
    serialized = ref_pipe.serialize()
    test_pipe = Pipeline.deserialize(serialized)
    test_utils.compare_pipelines(ref_pipe,
                                 test_pipe,
                                 batch_size=batch_size,
                                 N_iterations=3)
Beispiel #22
0
def test_mxnet_reader_alias():
    recordio = [
        os.path.join(get_dali_extra_path(), 'db', 'recordio', 'train.rec')
    ]
    recordio_idx = [
        os.path.join(get_dali_extra_path(), 'db', 'recordio', 'train.idx')
    ]
    new_pipe = mxnet_pipe(fn.readers.mxnet, recordio, recordio_idx)
    legacy_pipe = mxnet_pipe(fn.mxnet_reader, recordio, recordio_idx)
    compare_pipelines(new_pipe, legacy_pipe, batch_size_alias_test, 50)
def check_cmn_cpu_old_vs_new(device_new, device_old, batch_size, output_dtype, output_layout, mirror_probability, mean, std, pad_output):
    compare_pipelines(CropMirrorNormalizePipeline(device_old, batch_size, output_dtype=output_dtype,
                                                  output_layout=output_layout, mirror_probability=mirror_probability,
                                                  mean=mean, std=std, pad_output=pad_output,
                                                  is_new_cmn=False),
                      CropMirrorNormalizePipeline(device_new, batch_size, output_dtype=output_dtype,
                                                  output_layout=output_layout, mirror_probability=mirror_probability,
                                                  mean=mean, std=std, pad_output=pad_output,
                                                  is_new_cmn=True),
                      batch_size=batch_size, N_iterations=10)
Beispiel #24
0
def check_operator_mfcc_vs_python(device, batch_size, input_shape,
                                  axis, dct_type, lifter, n_mfcc, norm):
    eii1 = RandomDataIterator(batch_size, shape=input_shape, dtype=np.float32)
    eii2 = RandomDataIterator(batch_size, shape=input_shape, dtype=np.float32)
    compare_pipelines(
        MFCCPipeline(device, batch_size, iter(eii1),
                     axis=axis, dct_type=dct_type, lifter=lifter, n_mfcc=n_mfcc, norm=norm),
        MFCCPythonPipeline(device, batch_size, iter(eii2),
                           axis=axis, dct_type=dct_type, lifter=lifter, n_mfcc=n_mfcc, norm=norm),
        batch_size=batch_size, N_iterations=5, eps=1e-03)
Beispiel #25
0
def check_numpy_reader_alias(test_data_root, device):
    new_pipe = numpy_reader_pipe(fn.readers.numpy,
                                 path=test_data_root,
                                 device=device,
                                 path_filter="test_*.npy")
    legacy_pipe = numpy_reader_pipe(fn.numpy_reader,
                                    path=test_data_root,
                                    device=device,
                                    path_filter="test_*.npy")
    compare_pipelines(new_pipe, legacy_pipe, batch_size_alias_test, 50)
Beispiel #26
0
def test_slice_vs_numpy_slice_cpu():
    for batch_size in {1, 32, 64}:
        eii = SliceArgsIteratorAllDims(batch_size)
        compare_pipelines(SlicePipeline('cpu',
                                        batch_size,
                                        iter(eii),
                                        is_old_slice=False),
                          PythonOperatorPipeline(slice_func, batch_size),
                          batch_size=batch_size,
                          N_iterations=10)
Beispiel #27
0
def check_operator_erase_vs_python(device, batch_size, input_shape,
                                   anchor, shape, axis_names, axes, input_layout, fill_value):
    eii1 = RandomDataIterator(batch_size, shape=input_shape, dtype=np.float32)
    eii2 = RandomDataIterator(batch_size, shape=input_shape, dtype=np.float32)
    compare_pipelines(
        ErasePipeline(device, batch_size, input_layout, iter(eii1), anchor=anchor,
                      shape=shape, axis_names=axis_names, axes=axes, fill_value=fill_value),
        ErasePythonPipeline(device, batch_size, input_layout, iter(eii2), anchor=anchor,
                            shape=shape, axis_names=axis_names, axes=axes, fill_value=fill_value),
        batch_size=batch_size, N_iterations=5, eps=1e-04, expected_layout=input_layout)
def check_operator_to_decibels_vs_python(device, batch_size, input_shape,
                                         multiplier, reference, cutoff_db):
    eii1 = RandomDataIterator(batch_size, shape=input_shape, dtype=np.float32)
    eii2 = RandomDataIterator(batch_size, shape=input_shape, dtype=np.float32)
    compare_pipelines(
        ToDecibelsPipeline(device, batch_size, iter(eii1),
                          multiplier=multiplier, reference=reference, cutoff_db=cutoff_db),
        ToDecibelsPythonPipeline(device, batch_size, iter(eii2),
                          multiplier=multiplier, reference=reference, cutoff_db=cutoff_db),
        batch_size=batch_size, N_iterations=5, eps=1e-04)
Beispiel #29
0
def test_file_reader_alias():
    fnames = g_files

    file_list = os.path.join(g_root, "list.txt")
    with open(file_list, "w") as f:
        for i, name in enumerate(fnames):
            f.write("{0} {1}\n".format(name, 10000 - i))
    new_pipe = file_pipe(fn.readers.file, file_list)
    legacy_pipe = file_pipe(fn.file_reader, file_list)
    compare_pipelines(new_pipe, legacy_pipe, batch_size_alias_test, 50)
def test_pipeline_runtime(flip_vertical, flip_horizontal):
    put_combined = pipeline_runtime(flip_vertical,
                                    flip_horizontal,
                                    batch_size=max_batch_size,
                                    num_threads=num_threads,
                                    device_id=device_id)
    ref = reference_pipeline(flip_vertical, flip_horizontal)
    compare_pipelines(put_combined,
                      ref,
                      batch_size=max_batch_size,
                      N_iterations=N_ITER)