def check_generic_gaussian_blur(batch_size,
                                sigma,
                                window_size,
                                shape,
                                layout,
                                axes,
                                op_type="cpu",
                                in_dtype=np.uint8,
                                out_dtype=types.NO_TYPE,
                                random_shape=True):
    pipe = Pipeline(batch_size=batch_size, num_threads=4, device_id=0)
    min_shape = None if random_shape else shape
    data = RandomlyShapedDataIterator(batch_size,
                                      min_shape=min_shape,
                                      max_shape=shape,
                                      dtype=in_dtype)
    # Extract the numpy type from DALI, we can have float32 or the same as input
    if out_dtype == types.NO_TYPE:
        result_type = in_dtype
    elif dali_type(in_dtype) == out_dtype:
        result_type = in_dtype
    else:
        result_type = np.float32
    with pipe:
        input = fn.external_source(data, layout=layout)
        if op_type == "gpu":
            input = input.gpu()
        blurred = fn.gaussian_blur(input,
                                   device=op_type,
                                   sigma=sigma,
                                   window_size=window_size,
                                   dtype=out_dtype)
        pipe.set_outputs(blurred, input)
    pipe.build()

    for _ in range(test_iters):
        result, input = pipe.run()
        if op_type == "gpu":
            result = result.as_cpu()
            input = input.as_cpu()
        input = to_batch(input, batch_size)
        skip_axes = count_skip_axes(layout)
        baseline = [
            gaussian_baseline(img,
                              sigma,
                              window_size,
                              axes,
                              skip_axes,
                              dtype=result_type) for img in input
        ]
        max_error = 1 if result_type != np.float32 else 1e-04
        check_batch(result,
                    baseline,
                    batch_size,
                    max_allowed_error=max_error,
                    expected_layout=layout)
Example #2
0
def check_coin_flip(device='cpu',
                    batch_size=32,
                    max_shape=[1e5],
                    p=None,
                    use_shape_like_input=False):
    pipe = Pipeline(batch_size=batch_size,
                    device_id=0,
                    num_threads=3,
                    seed=123456)
    with pipe:

        def shape_gen_f():
            return random_shape(max_shape)

        shape_arg = None
        inputs = []
        shape_out = None
        if max_shape is not None:
            if use_shape_like_input:
                shape_like_in = dali.fn.external_source(
                    lambda: np.zeros(shape_gen_f()),
                    device=device,
                    batch=False)
                inputs += [shape_like_in]
                shape_out = dali.fn.shapes(shape_like_in)
            else:
                shape_arg = dali.fn.external_source(shape_gen_f, batch=False)
                shape_out = shape_arg
        outputs = [
            dali.fn.random.coin_flip(*inputs,
                                     device=device,
                                     probability=p,
                                     shape=shape_arg)
        ]
        if shape_out is not None:
            outputs += [shape_out]
        pipe.set_outputs(*outputs)
    pipe.build()
    outputs = pipe.run()
    data_out = outputs[0].as_cpu() if isinstance(outputs[0],
                                                 TensorListGPU) else outputs[0]
    shapes_out = None
    if max_shape is not None:
        shapes_out = outputs[1].as_cpu() if isinstance(
            outputs[1], TensorListGPU) else outputs[1]
    p = p if p is not None else 0.5
    for i in range(batch_size):
        data = np.array(data_out[i])
        assert np.logical_or(data == 0, data == 1).all()
        if max_shape is not None:
            sample_shape = np.array(shapes_out[i])
            assert (data.shape == sample_shape).all()
            total = len(data)
            positive = np.count_nonzero(data)
            np.testing.assert_allclose(p, positive / total,
                                       atol=0.005)  # +/- -.5%
Example #3
0
def test_mxnet_reader_cpu():
    pipe = Pipeline(batch_size=batch_size, num_threads=4, device_id=None)
    out, _ = fn.readers.mxnet(path=os.path.join(recordio_dir, "train.rec"),
                              index_path=os.path.join(recordio_dir,
                                                      "train.idx"),
                              shard_id=0,
                              num_shards=1)
    check_no_input(fn.readers.mxnet,
                   path=os.path.join(recordio_dir, "train.rec"),
                   index_path=os.path.join(recordio_dir, "train.idx"),
                   shard_id=0,
                   num_shards=1)
Example #4
0
def test_move_to_device_end():
    test_data_shape = [1, 3, 0, 4]

    def get_data():
        out = [
            np.empty(test_data_shape, dtype=np.uint8)
            for _ in range(batch_size)
        ]
        return out

    pipe = Pipeline(batch_size=batch_size, num_threads=3, device_id=None)
    outs = fn.external_source(source=get_data)
    pipe.set_outputs(outs.gpu())
    assert_raises(
        RuntimeError,
        pipe.build,
        glob=
        'Cannot move the data node __ExternalSource_0 to the GPU in a CPU-only pipeline. '
        'The `device_id` parameter is set to `CPU_ONLY_DEVICE_ID`. '
        'Set `device_id` to a valid GPU identifier to enable GPU features in the pipeline.'
    )
def test_set_outputs_err_msg_random_type():
    pipe = Pipeline(batch_size=1, num_threads=1, device_id=None)
    pipe.set_outputs("test")
    with assert_raises(TypeError,
                       glob='Illegal output type. '
                            'The output * is a `<class \'str\'>`.'):
        pipe.build()
def get_pipeline(batch_size=4,
                 in_size=None,
                 out_size=None,
                 even_paste_count=False,
                 k=4,
                 dtype=types.UINT8,
                 no_intersections=True,
                 full_input=False,
                 in_anchor_top_left=False,
                 out_anchor_top_left=False,
                 use_gpu=False,
                 num_out_of_bounds=0):
    pipe = Pipeline(batch_size=batch_size, num_threads=4, device_id=0)
    with pipe:
        input, _ = fn.readers.file(file_root=img_dir)
        decoded = fn.decoders.image(input, device='cpu', output_type=types.RGB)
        resized = fn.resize(decoded, resize_x=in_size[1], resize_y=in_size[0])
        in_idx_l, in_anchors_l, shapes_l, out_anchors_l = prepare_cuts(
            k, batch_size, in_size, out_size, even_paste_count,
            no_intersections, full_input, in_anchor_top_left,
            out_anchor_top_left, num_out_of_bounds)
        in_idx = fn.external_source(lambda: in_idx_l)
        in_anchors = fn.external_source(lambda: in_anchors_l)
        shapes = fn.external_source(lambda: shapes_l)
        out_anchors = fn.external_source(lambda: out_anchors_l)
        kwargs = {"in_ids": in_idx, "output_size": out_size, "dtype": dtype}

        if not full_input:
            kwargs["shapes"] = shapes

        if not in_anchor_top_left:
            kwargs["in_anchors"] = in_anchors

        if not out_anchor_top_left:
            kwargs["out_anchors"] = out_anchors

        pasted = fn.multi_paste(resized.gpu() if use_gpu else resized,
                                **kwargs)
        pipe.set_outputs(pasted, resized)
    return pipe, in_idx_l, in_anchors_l, shapes_l, out_anchors_l
Example #7
0
def get_pipeline(folder="train", custom_reader=None):
    pipe = Pipeline(batch_size=64, num_threads=1, device_id=1)

    if custom_reader:
        raw_files, labels = custom_reader
    else:
        raw_files, labels = fn.file_reader(file_root="%s" % folder,
                                           random_shuffle=True)

    decode = fn.image_decoder(raw_files, device="mixed", output_type=types.GRAY)
    resize = fn.resize(decode, device="gpu", image_type=types.RGB,
                       interp_type=types.INTERP_LINEAR, resize_x=WIDTH, resize_y=HEIGHT)

    hsv = fn.hsv(resize, hue=fn.uniform(range=(-10, 10)), saturation=fn.uniform(range=(-.5, .5)),
                 value=fn.uniform(range=(0.9, 1.2)), device="gpu", dtype=types.UINT8)
    bc = fn.brightness_contrast(hsv, device="gpu", brightness=fn.uniform(range=(.9, 1.1)))

    cmn = fn.crop_mirror_normalize(bc, device="gpu", output_dtype=types.FLOAT,
                                   output_layout=types.NHWC,
                                   image_type=types.GRAY,
                                   mean=[255 // 2],
                                   std=[255 // 2])

    rot = fn.rotate(cmn, angle=fn.uniform(range=(-40, 40)), device="gpu", keep_size=True)

    tpose = fn.transpose(rot, perm=(2, 0, 1), device="gpu")  # Reshaping to a format PyTorch likes

    pipe.set_outputs(tpose, labels)
    pipe.build()

    dali_iter = DALIClassificationIterator([pipe], -1)

    return dali_iter
Example #8
0
def get_pipeline(batch_size,
                 num_threads,
                 device,
                 device_id=0,
                 shard_id=0,
                 num_shards=1):
    test_data_root = os.environ['DALI_EXTRA_PATH']
    file_root = os.path.join(test_data_root, 'db', 'coco_dummy', 'images')
    annotations_file = os.path.join(test_data_root, 'db', 'coco_dummy',
                                    'instances.json')

    pipe = Pipeline(batch_size, num_threads, device_id)
    with pipe:
        jpegs, _, _, image_ids = fn.coco_reader(
            file_root=file_root,
            annotations_file=annotations_file,
            shard_id=shard_id,
            num_shards=num_shards,
            ratio=False,
            image_ids=True)
        images = fn.image_decoder(
            jpegs,
            device=('mixed' if device == 'gpu' else 'cpu'),
            output_type=types.RGB)
        images = fn.resize(images,
                           resize_x=224,
                           resize_y=224,
                           interp_type=types.INTERP_LINEAR)
        images = fn.crop_mirror_normalize(images,
                                          dtype=types.FLOAT,
                                          mean=[128., 128., 128.],
                                          std=[1., 1., 1.])
        if device == 'gpu':
            image_ids = image_ids.gpu()
        ids_reshaped = fn.reshape(image_ids, shape=[1, 1])
        ids_int16 = fn.cast(image_ids, dtype=types.INT16)

        pipe.set_outputs(images, ids_reshaped, ids_int16)

    return pipe
def check_dim_mismatch(device, test_data_root, names):
    pipe = Pipeline(2, 2, 0)
    pipe.set_outputs(
        fn.numpy_reader(device=device, file_root=test_data_root, files=names))
    pipe.build()
    err = None
    try:
        pipe.run()
    except RuntimeError as thrown:
        err = thrown
    # asserts should not be in except block to avoid printing nested exception on failure
    assert err, "Exception not thrown"
    assert "Inconsistent data" in str(
        err), "Unexpected error message: {}".format(err)
Example #10
0
def _test_external_source_callback_split(use_fn_api, batch, as_tensor, device):
    iter_num = 5
    batch_size = 9
    pipe = Pipeline(batch_size, 3, 0)

    # this should produce a two-element list of Tensor(Lists), the first
    # being 2D, the second being 3D (+ batch dimension)
    source = TestIterator(iter_num, batch_size, [2, 3], as_tensor)
    iter_in = iter(source) if batch else iter(
        SampleIterator(iter(source), True))

    if use_fn_api:
        inputs = fn.external_source(lambda: next(iter_in),
                                    2,
                                    device=device,
                                    batch=batch)
    else:
        ext_source = ops.ExternalSource(lambda: next(iter_in),
                                        num_outputs=2,
                                        device=device,
                                        batch=batch)
        inputs = ext_source()
    pipe.set_outputs(*inputs)
    pipe.build()

    run_and_check(pipe, source)
def check_transform_scale_op(scale, center=None, has_input = False, reverse_order=False, batch_size=1, num_threads=4, device_id=0):
    ndim = len(scale)
    assert center is None or len(center) == ndim

    pipe = Pipeline(batch_size=batch_size, num_threads=num_threads, device_id=device_id)
    with pipe:
        if has_input:
            T0 = fn.uniform(range=(-1, 1), shape=(ndim, ndim+1), seed = 1234)
            T1 = fn.transforms.scale(T0, device='cpu', scale=scale, center=center, reverse_order=reverse_order)
            pipe.set_outputs(T1, T0)
        else:
            T1 = fn.transforms.scale(device='cpu', scale=scale, center=center)
            pipe.set_outputs(T1)
    pipe.build()
    outs = pipe.run()
    ref_mat = scale_affine_mat(scale=scale, center=center)
    T0 = outs[1] if has_input else None
    check_results(outs[0], batch_size, ref_mat, T0, reverse_order)
Example #12
0
def check_container(cont):
    pipe = Pipeline(batch_size=1, num_threads=4, device_id=0)
    path = os.path.join(video_containers_data_root, cont)
    test_videos = [path + '/' + f for f in os.listdir(path)]
    with pipe:
        # mkv container for some reason fails in DALI VFR heuristics
        vid = fn.video_reader(device="gpu",
                              filenames=test_videos,
                              sequence_length=10,
                              skip_vfr_check=True,
                              stride=1,
                              name="Reader")
        pipe.set_outputs(vid)
    pipe.build()

    iter_num = pipe.reader_meta("Reader")["epoch_size"]
    for _ in range(iter_num):
        pipe.run()
Example #13
0
def test_python_function_cpu():
    def resize(image):
        return np.array(Image.fromarray(image).resize((50, 10)))

    pipe = Pipeline(batch_size=batch_size,
                    num_threads=4,
                    device_id=None,
                    exec_async=False,
                    exec_pipelined=False)
    check_single_input(fn.python_function,
                       function=resize,
                       exec_async=False,
                       exec_pipelined=False)
def test_external_source_iterate_ndarray():
    pipe = Pipeline(4, 3, 0)

    batch = make_array([1.5, 2.5, 2, 3], dtype=datapy.float32)

    pipe.set_outputs(fn.external_source(batch, batch=False))
    pipe.build()
    run_and_check(pipe, [batch])
Example #15
0
def check_corrupted_videos():
    corrupted_videos = [corrupted_video_data_root + '/' + f for f in os.listdir(corrupted_video_data_root)]
    for corrupted in corrupted_videos:
        pipe = Pipeline(batch_size=BATCH_SIZE, num_threads=4, device_id=0)
        with pipe:
            vid = fn.video_reader(device="gpu", filenames=corrupted, sequence_length=1)
            pipe.set_outputs(vid)
        pipe.build()
Example #16
0
def test_set_outputs_err_msg_unpack():
    data = [[[np.random.rand(1, 3, 2)], [np.random.rand(1, 4, 5)]]]
    pipe = Pipeline(batch_size=1, num_threads=1, device_id=None)
    pipe.set_outputs(fn.external_source(data, num_outputs=2, cycle='quiet'))
    with assert_raises(TypeError,
                       glob='Illegal pipeline output type. '
                            'The output * contains a nested `DataNode`'):
        pipe.build()
def check_transform_rotation_op(angle, axis=None, center=None, has_input = False, reverse_order=False, batch_size=1, num_threads=4, device_id=0):
    assert axis is None or len(axis) == 3
    ndim = 3 if axis is not None else 2
    assert center is None or len(center) == ndim

    pipe = Pipeline(batch_size=batch_size, num_threads=num_threads, device_id=device_id)
    with pipe:
        if has_input:
            T0 = fn.uniform(range=(-1, 1), shape=(ndim, ndim+1), seed = 1234)
            T1 = fn.transforms.rotation(T0, device='cpu', angle=angle, axis=axis, center=center, reverse_order=reverse_order)
            pipe.set_outputs(T1, T0)
        else:
            T1 = fn.transforms.rotation(device='cpu', angle=angle, axis=axis, center=center)
            pipe.set_outputs(T1)
    pipe.build()
    outs = pipe.run()
    ref_mat = rotate_affine_mat(angle=angle, axis=axis, center=center)
    T0 = outs[1] if has_input else None
    check_results(outs[0], batch_size, ref_mat, T0, reverse_order, rtol=1e-5)
Example #18
0
def check_per_sample_gaussian_blur(batch_size,
                                   sigma_dim,
                                   window_size_dim,
                                   shape,
                                   layout,
                                   axes,
                                   op_type="cpu"):
    pipe = Pipeline(batch_size=batch_size, num_threads=4, device_id=0)
    data = RandomlyShapedDataIterator(batch_size, max_shape=shape)
    with pipe:
        if sigma_dim is not None:
            sigma = fn.uniform(range=[0.5, 3], shape=[sigma_dim])
            sigma_arg = sigma
        else:
            # placeholder, so we can return something
            sigma = fn.coin_flip(probability=0)
            sigma_arg = None

        if window_size_dim is not None:
            window_radius = fn.uniform(range=[5, 10], shape=[window_size_dim])
            window_size = fn.cast(window_radius, dtype=types.INT32) * 2 + 1
            window_arg = window_size
        else:
            window_size = fn.coin_flip(probability=0)
            window_arg = None

        input = fn.external_source(data, layout=layout)
        if op_type == "gpu":
            input = input.gpu()
        blurred = fn.gaussian_blur(input,
                                   device=op_type,
                                   sigma=sigma_arg,
                                   window_size=window_arg)
        pipe.set_outputs(blurred, input, sigma, window_size)
    pipe.build()

    for _ in range(test_iters):
        result, input, sigma, window_size = pipe.run()
        if op_type == "gpu":
            result = result.as_cpu()
            input = input.as_cpu()
        input = to_batch(input, batch_size)
        sigma = to_batch(sigma, batch_size)
        window_size = to_batch(window_size, batch_size)
        baseline = []
        for i in range(batch_size):
            sigma_arg = sigma[i] if sigma is not None else None
            window_arg = window_size[i] if window_size_dim is not None else None
            skip_axes = count_skip_axes(layout)
            baseline.append(
                gaussian_baseline(input[i], sigma_arg, window_arg, axes,
                                  skip_axes))
        check_batch(result, baseline, batch_size, max_allowed_error=1)
Example #19
0
def test_external_source_generator():
    pipe = Pipeline(1, 3, 0)

    def gen():
        for i in range(5):
            yield [make_array([i + 1.5], dtype=datapy.float32)]

    pipe.set_outputs(fn.external_source(gen()))
    pipe.build()

    for i in range(5):
        check_output(pipe.run(), [np.array([i + 1.5], dtype=np.float32)])
def test_dtype_arg():
    batch_size = 2
    src_data = [[np.ones((120, 120, 3), dtype=np.uint8)] * batch_size]
    src_pipe = Pipeline(batch_size, 1, 0)
    src_ext = fn.external_source(source=src_data, dtype=DALIDataType.UINT8)
    src_pipe.set_outputs(src_ext)
    src_pipe.build()
    out, = src_pipe.run()
    for i in range(batch_size):
        t = out.at(i)
        assert t.dtype == np.uint8
        np.array_equal(t, np.ones((120, 120, 3), dtype=np.uint8))
Example #21
0
def NumpyReaderPipeline(path,
                        batch_size,
                        device="cpu",
                        file_list=None,
                        files=None,
                        path_filter="*.npy",
                        num_threads=1,
                        device_id=0,
                        num_gpus=1,
                        cache_header_information=False):
    pipe = Pipeline(batch_size=batch_size,
                    num_threads=num_threads,
                    device_id=0)
    data = fn.numpy_reader(device=device,
                           file_list=file_list,
                           files=files,
                           file_root=path,
                           file_filter=path_filter,
                           shard_id=0,
                           num_shards=1,
                           cache_header_information=cache_header_information)
    pipe.set_outputs(data)
    return pipe
Example #22
0
class DaliChecker:
    def __init__(self, batch_size, prefetch=2, device="mixed", device_id=0):
        log.debug("making checker")
        self.batch_size = batch_size
        self.prefetch = prefetch
        self.device = device
        self.device_id = device_id
        self.make_pipe()
        self.pipe.build()

    def make_pipe(self):
        log.debug("making pipe")
        self.pipe = Pipeline(batch_size=self.batch_size,
                             num_threads=2,
                             device_id=self.device_id,
                             prefetch_queue_depth=self.prefetch)
        with self.pipe:
            self.files = fn.external_source()
            images = fn.image_decoder(self.files, device=self.device)
            self.pipe.set_outputs(images)

    def feed(self, images):
        self.pipe.feed_input(self.files, images)
Example #23
0
def test_python_function_cpu():
    def resize(image):
        return np.array(Image.fromarray(image).resize((50, 10)))

    pipe = Pipeline(batch_size=batch_size,
                    num_threads=4,
                    device_id=None,
                    exec_async=False,
                    exec_pipelined=False)
    with pipe:
        data = fn.external_source(source=get_data, layout="HWC")
        processed = fn.python_function(data, function=resize)
        pipe.set_outputs(processed)
    pipe.build()
    for _ in range(3):
        pipe.run()
Example #24
0
def check_transform_shear_op(shear=None,
                             angles=None,
                             center=None,
                             has_input=False,
                             reverse_order=False,
                             batch_size=1,
                             num_threads=4,
                             device_id=0):
    assert shear is not None or angles is not None
    if shear is not None:
        assert len(shear) == 2 or len(shear) == 6
        ndim = 3 if len(shear) == 6 else 2
    else:
        assert len(angles) == 2 or len(angles) == 6
        ndim = 3 if len(angles) == 6 else 2
    assert center is None or len(center) == ndim

    pipe = Pipeline(batch_size=batch_size,
                    num_threads=num_threads,
                    device_id=device_id,
                    seed=1234)
    with pipe:
        if has_input:
            T0 = fn.random.uniform(range=(-1, 1), shape=(ndim, ndim + 1))
            T1 = fn.transforms.shear(T0,
                                     device='cpu',
                                     shear=shear,
                                     angles=angles,
                                     center=center,
                                     reverse_order=reverse_order)
            pipe.set_outputs(T1, T0)
        else:
            T1 = fn.transforms.shear(device='cpu',
                                     shear=shear,
                                     angles=angles,
                                     center=center)
            pipe.set_outputs(T1)
    pipe.build()
    outs = pipe.run()
    ref_mat = shear_affine_mat(shear=shear, angles=angles, center=center)
    T0 = outs[1] if has_input else None
    check_results(outs[0], batch_size, ref_mat, T0, reverse_order, atol=1e-6)
Example #25
0
def test_external_source_collection():
    pipe = Pipeline(1, 3, 0)

    batches = [[make_array([1.5, 2.5], dtype=datapy.float32)],
               [make_array([-1, 3.5, 4.5], dtype=datapy.float32)]]

    pipe.set_outputs(fn.external_source(batches))
    pipe.build()
    run_and_check(pipe, batches)
Example #26
0
def test_external_source_collection_cycling():
    pipe = Pipeline(1, 3, 0)

    batches = [[np.array([1.5, 2.5], dtype=np.float32)],
               [np.array([-1, 3.5, 4.5], dtype=np.float32)]]

    pipe.set_outputs(fn.external_source(batches, cycle=True))
    pipe.build()

    # epochs are cycles over the source iterable
    for epoch in range(3):
        for batch in batches:
            check_output(pipe.run(), batch)
Example #27
0
def test_external_source_gen_function_cycle():
    pipe = Pipeline(1, 3, 0)

    def gen():
        for i in range(5):
            yield [make_array([i + 1.5], dtype=datapy.float32)]

    pipe.set_outputs(fn.external_source(gen, cycle=True))
    pipe.build()

    for _ in range(3):
        for i in range(5):
            check_output(pipe.run(), [np.array([i + 1.5], dtype=np.float32)])
Example #28
0
def test_element_extract_cpu():
    pipe = Pipeline(batch_size=batch_size, num_threads=4, device_id=None)
    test_data_shape = [5, 10, 20, 3]

    def get_data():
        out = [
            np.random.randint(0, 255, size=test_data_shape, dtype=np.uint8)
            for _ in range(batch_size)
        ]
        return out

    data = fn.external_source(source=get_data, layout="FHWC")
    processed, _ = fn.element_extract(data, element_map=[0, 3])
    pipe.set_outputs(processed)
    pipe.build()
    for _ in range(3):
        pipe.run()
Example #29
0
def test_sequence_rearrange_cpu():
    pipe = Pipeline(batch_size=batch_size, num_threads=4, device_id=None)
    test_data_shape = [5, 10, 20, 3]

    def get_data():
        out = [
            np.random.randint(0, 255, size=test_data_shape, dtype=np.uint8)
            for _ in range(batch_size)
        ]
        return out

    data = fn.external_source(source=get_data, layout="FHWC")
    processed = fn.sequence_rearrange(data, new_order=[0, 4, 1, 3, 2])
    pipe.set_outputs(processed)
    pipe.build()
    for _ in range(3):
        pipe.run()
Example #30
0
def check_deserialization_with_params(batch_size, num_threads, shape):
    init_pipe = TestPipeline(batch_size=batch_size,
                             num_threads=num_threads,
                             shape=shape)
    serialized = init_pipe.serialize()
    ref_pipe = TestPipeline(batch_size=batch_size**2,
                            num_threads=num_threads + 1,
                            shape=shape)
    test_pipe = Pipeline.deserialize(serialized,
                                     batch_size=batch_size**2,
                                     num_threads=num_threads + 1)
    test_utils.compare_pipelines(ref_pipe,
                                 test_pipe,
                                 batch_size=batch_size**2,
                                 N_iterations=3)