Exemple #1
0
 def load_data(self, features):
     img = fn.reshape(features["X"],
                      shape=features["X_shape"],
                      layout=self.layout)
     lbl = fn.reshape(features["Y"],
                      shape=features["Y_shape"],
                      layout=self.layout)
     lbl = fn.reinterpret(lbl, dtype=types.DALIDataType.UINT8)
     return img, lbl
 def load_data(self):
     img = self.input_x(name="ReaderX")
     if self.load_to_gpu:
         img = img.gpu()
     img = fn.reshape(img, layout="CDHW")
     if self.input_y is not None:
         lbl = self.input_y(name="ReaderY")
         if self.load_to_gpu:
             lbl = lbl.gpu()
         lbl = fn.reshape(lbl, layout="CDHW")
         return img, lbl
     return img
Exemple #3
0
    def define_graph(self):
        inputs, bboxes, labels, polygons, vertices = fn.readers.coco(
                                            file_root=self.file_root,
                                            annotations_file=self.annotation_file,
                                            skip_empty=True,
                                            shard_id=self.share_id,
                                            num_shards=self.num_gpus,
                                            ratio=True,
                                            ltrb=True,
                                            polygon_masks = True,
                                            random_shuffle=self.random_shuffle,
                                            shuffle_after_epoch=self.shuffle_after_epoch,
                                            name="Reader")

        input_shape = fn.slice(fn.cast(fn.peek_image_shape(inputs), dtype=types.INT32), 0, 2, axes=[0])
        h = fn.slice(input_shape, 0, 1, axes = [0], dtype=types.FLOAT)
        w = fn.slice(input_shape, 1, 1, axes = [0], dtype=types.FLOAT)
        short_side = math.min(w, h)        
        scale = fn.random.uniform(range=[0.3, 1.])
        crop_side = fn.cast(math.ceil(scale * short_side), dtype=types.INT32)    
        crop_shape = fn.cat(crop_side, crop_side)
        anchor_rel, shape_rel, bboxes, labels, bbox_indices = fn.random_bbox_crop(
                        bboxes,
                        labels,
                        input_shape=input_shape,
                        crop_shape=crop_shape,
                        shape_layout="HW",
                        thresholds=[0.],            # No minimum intersection-over-union, for demo purposes
                        allow_no_crop=False,        # No-crop is disallowed, for demo purposes 
                        seed=-1,                    # Fixed random seed for deterministic results
                        bbox_layout="xyXY",         # left, top, right, back
                        output_bbox_indices=True,   # Output indices of the filtered bounding boxes
                        total_num_attempts=1024,
        )
        polygons, vertices = fn.segmentation.select_masks(
            bbox_indices, polygons, vertices
        )
        images = fn.decoders.image_slice(
            inputs, anchor_rel, shape_rel, normalized_anchor=False, normalized_shape=False, device='mixed'
        )
        images = fn.color_space_conversion(images, image_type=types.RGB, output_type=types.BGR)
        MT_1_vertices = fn.transforms.crop(
            to_start=(0.0, 0.0), to_end=fn.cat(w, h)
        )    
        MT_2_vertices = fn.transforms.crop(
            from_start=anchor_rel, from_end=(anchor_rel + shape_rel),
            to_start=(0.0, 0.0), to_end=(1., 1.)
        )    
        vertices = fn.coord_transform(fn.coord_transform(vertices, MT=MT_1_vertices), MT=MT_2_vertices)    
        targets = fn.cat( bboxes, fn.reshape(vertices, shape=[-1, 10]), axis=1)

        interp_methods = [types.INTERP_LINEAR, types.INTERP_CUBIC, types.INTERP_LANCZOS3, types.INTERP_GAUSSIAN, types.INTERP_NN, types.INTERP_TRIANGULAR]
        interp_method = fn.random.uniform(values=[int(x) for x in interp_methods], dtype=types.INT32)
        interp_method = fn.reinterpret(interp_method, dtype=types.INTERP_TYPE)
        images = fn.resize(images, dtype=types.FLOAT, size=self.input_dim, interp_type=interp_method)

        labels = labels.gpu()
        targets = targets.gpu()
        return (images, targets, labels)
Exemple #4
0
    def define_graph(self):
        jpegs, dummy_labels = self.input()
        self.labels = self.label()
        self.crop_dim = self.crops()
        anchor =  fn.reshape(fn.slice(self.crop_dim, 0, 2, axes=[1]), shape=[-1])
        shape = fn.reshape(fn.slice(self.crop_dim, 2, 2, axes = [1]), shape= [-1])
        anchor = self.cast(anchor)
        shape = self.cast(shape)
        images = self.decode(jpegs)
        images = self.res(images)


#       decode and slicing
        jpegs = fn.slice(jpegs, anchor, shape, axes= [0,1], device= 'gpu')
        jpegs = self.res(jpegs)

        return (images, self.labels, self.crop_dim)
 def pipeline():
     outputs = fn.external_source(  # noqa F841
         source=sample_cb_source,
         batch=False,
         num_outputs=num_outputs)
     data = fn.random.uniform(range=(0, 255), shape=(300, 100, 3))
     img = fn.reshape(data, layout="HWC")
     return fn.gaussian_blur(img, window_size=3)
Exemple #6
0
 def create_pipline():
     jpegs = fn.external_source(source=callback,
                                num_outputs=sequence_lenght * 2,
                                parallel=parallel,
                                batch=False)
     images = fn.decoders.image(jpegs, device="cpu")
     sequence = fn.stack(*images)
     sequence = fn.reshape(sequence, layout="DHWC")
     return sequence
Exemple #7
0
def make_param(kind, shape):
    if kind == "input":
        return fn.random.uniform(range=(0, 1), shape=shape)
    elif kind == "scalar input":
        return fn.reshape(fn.random.uniform(range=(0, 1)), shape=[])
    elif kind == "vector":
        return np.random.rand(*shape).astype(np.float32)
    elif kind == "scalar":
        return np.random.rand()
    else:
        return None
Exemple #8
0
def create_ref_pipe(channel_first, seq_len, interp, dtype, w, h, batch_size=2):
    pipe = dali.pipeline.Pipeline(batch_size, 1, 0, 0, exec_async=False, exec_pipelined=False)
    with pipe:
        layout = "FCHW" if channel_first else "FHWC"
        ext = fn.external_source(GetSequences(channel_first, seq_len, batch_size), layout=layout)
        pil_resized = fn.python_function(ext, function=resize_PIL(channel_first, interp, w, h),
                                         batch_processing=False)
        if dtype is not None:  # unfortunately, PIL can't quite handle that
            pil_resized = fn.cast(pil_resized, dtype=dtype)
        pil_resized = fn.reshape(pil_resized, layout=layout)
        pipe.set_outputs(pil_resized)
    return pipe
Exemple #9
0
def dali_frame_splicing_graph(x, nfeatures, x_len, stacking=1, subsampling=1):
    if stacking > 1:
        seq = [x]
        for n in range(1, stacking):
            f = fn.slice(x,
                         n,
                         x_len,
                         axes=(1, ),
                         out_of_bounds_policy='pad',
                         fill_values=0)
            seq.append(f)
        x = fn.cat(*seq, axis=0)
        nfeatures = nfeatures * stacking
    if subsampling > 1:
        out_len = (x_len + subsampling - 1) // subsampling
        m = fn.transforms.scale(scale=[subsampling, 1], center=[0.5, 0])
        x = fn.reshape(x, rel_shape=[1, 1, -1],
                       layout="HWC")  # Layout required by WarpAffine
        size = fn.cat(nfeatures, out_len)
        x = fn.warp_affine(x, matrix=m, size=size, interp_type=types.INTERP_NN)
        x = fn.reshape(x, rel_shape=[1, 1], layout="ft")
    return x
Exemple #10
0
def tfrecord_pipeline(dspath, batch_size, num_threads, device="cpu", device_id=None,
                        shard_id=0, num_shards=1, reader_name="Reader",
                        seq=True, chroms=False, chroms_vlog=False, target=True, target_vlog=True, label=False, random_shuffle=True):
    pipe = Pipeline(batch_size=batch_size, num_threads=num_threads, device_id=device_id)

    feature_description = {}
    feature_description["seq"] = tfrec.VarLenFeature(tfrec.float32, -1.0)
    feature_description["label"] = tfrec.FixedLenFeature([], tfrec.int64, -1)
    feature_description["target"] = tfrec.FixedLenFeature([], tfrec.float32, -1.0)
    for ct in dspath["chromatin_tracks"]:
        feature_description[ct] = tfrec.VarLenFeature(tfrec.float32, -1.0)

    with pipe:
        inputs = fn.readers.tfrecord(
            name=reader_name,
            path=dspath['TFRecord'],
            index_path=dspath['TFRecord_idx'],
            features=feature_description,
            shard_id = shard_id,
            num_shards = num_shards,
            random_shuffle=random_shuffle,
            read_ahead=True, 
            prefetch_queue_depth=20,
            pad_last_batch=True)
        if device=="gpu":
            inputs['seq'] = inputs['seq'].gpu()
            for ct in dspath["chromatin_tracks"]: inputs[ct] = inputs[ct].gpu()
            inputs['target'] = inputs['target'].gpu()
            inputs['label'] = inputs['label'].gpu()
        seqdata = fn.expand_dims(inputs['seq'], axes=1, device=device)
        seqdata = fn.reshape(seqdata, shape=(4, -1), device=device)
        chromsdata = fn.cat(*[fn.expand_dims(inputs[ct], axes=0, device=device) for ct in dspath["chromatin_tracks"]], axis=0, device=device)

        sample = []
        if seq: sample.append(seqdata)
        if chroms: 
            if chroms_vlog:
                sample.append(log(chromsdata + 1))
            else:
                sample.append(chromsdata)
        if target:
            if target_vlog: 
                sample.append(log(inputs['target'] + 1))
            else:
                sample.append(inputs['target'])
        if label: sample.append(inputs['label'])

        pipe.set_outputs(*sample)
    return pipe
Exemple #11
0
def get_image_pipeline(batch_size, num_threads, device, device_id=0, shard_id=0, num_shards=1,
        def_for_dataset=False):
    test_data_root = get_dali_extra_path()
    file_root = os.path.join(test_data_root, 'db', 'coco_dummy', 'images')
    annotations_file = os.path.join(
        test_data_root, 'db', 'coco_dummy', 'instances.json')

    pipe = Pipeline(batch_size, num_threads, device_id)
    with pipe:
        jpegs, _, _, image_ids = fn.readers.coco(
            file_root=file_root,
            annotations_file=annotations_file,
            shard_id=shard_id,
            num_shards=num_shards,
            ratio=False,
            image_ids=True)
        images = fn.decoders.image(
            jpegs,
            device=('mixed' if device == 'gpu' else 'cpu'),
            output_type=types.RGB)
        images = fn.resize(
            images,
            resize_x=224,
            resize_y=224,
            interp_type=types.INTERP_LINEAR)
        images = fn.crop_mirror_normalize(
            images,
            dtype=types.FLOAT,
            mean=[128., 128., 128.],
            std=[1., 1., 1.])
        if device == 'gpu':
            image_ids = image_ids.gpu()
        ids_reshaped = fn.reshape(image_ids, shape=[1, 1])
        ids_int16 = fn.cast(image_ids, dtype=types.INT16)

        pipe.set_outputs(images, ids_reshaped, ids_int16)

    shapes = (
        (batch_size, 3, 224, 224),
        (batch_size, 1, 1),
        (batch_size, 1))
    dtypes = (
        tf.float32,
        tf.int32,
        tf.int16)

    return pipe, shapes, dtypes
 def pipe():
     image_like = fn.random.uniform(device=device,
                                    range=(0, 255),
                                    shape=(80, 120, 3))
     image_like = fn.reshape(image_like, layout="HWC")
     mean = [0.485 * 255, 0.456 * 255, 0.406 * 255]
     std = [0.229 * 255, 0.224 * 255, 0.225 * 255]
     if rand_mean:
         mean = fn.random.uniform(range=(100, 125), shape=(3, ))
     if rand_stdev:
         std = fn.random.uniform(range=(55, 60), shape=(3, ))
     out = fn.crop_mirror_normalize(image_like,
                                    dtype=types.FLOAT,
                                    output_layout="HWC",
                                    mean=mean,
                                    std=std,
                                    scale=scale,
                                    shift=shift,
                                    pad_output=False)
     return out, image_like, mean, std
Exemple #13
0
def get_pipeline(batch_size,
                 num_threads,
                 device,
                 device_id=0,
                 shard_id=0,
                 num_shards=1):
    test_data_root = os.environ['DALI_EXTRA_PATH']
    file_root = os.path.join(test_data_root, 'db', 'coco_dummy', 'images')
    annotations_file = os.path.join(test_data_root, 'db', 'coco_dummy',
                                    'instances.json')

    pipe = Pipeline(batch_size, num_threads, device_id)
    with pipe:
        jpegs, _, _, image_ids = fn.coco_reader(
            file_root=file_root,
            annotations_file=annotations_file,
            shard_id=shard_id,
            num_shards=num_shards,
            ratio=False,
            image_ids=True)
        images = fn.image_decoder(
            jpegs,
            device=('mixed' if device == 'gpu' else 'cpu'),
            output_type=types.RGB)
        images = fn.resize(images,
                           resize_x=224,
                           resize_y=224,
                           interp_type=types.INTERP_LINEAR)
        images = fn.crop_mirror_normalize(images,
                                          dtype=types.FLOAT,
                                          mean=[128., 128., 128.],
                                          std=[1., 1., 1.])
        if device == 'gpu':
            image_ids = image_ids.gpu()
        ids_reshaped = fn.reshape(image_ids, shape=[1, 1])
        ids_int16 = fn.cast(image_ids, dtype=types.INT16)

        pipe.set_outputs(images, ids_reshaped, ids_int16)

    return pipe
def check_pad_to_square(device='cpu', batch_size=3, ndim=2, num_iter=3):
    pipe = Pipeline(batch_size=batch_size,
                    num_threads=3,
                    device_id=0,
                    seed=1234)
    axes = (0, 1)
    with pipe:
        in_shape = fn.cast(fn.random.uniform(range=(10, 20), shape=(ndim, )),
                           dtype=types.INT32)
        in_data = fn.reshape(fn.random.uniform(range=(0., 1.), shape=in_shape),
                             layout="HW")
        shape = fn.shapes(in_data, dtype=types.INT32)
        h = fn.slice(shape, 0, 1, axes=[0])
        w = fn.slice(shape, 1, 1, axes=[0])
        side = math.max(h, w)
        if device == 'gpu':
            in_data = in_data.gpu()
        out_data = fn.pad(in_data,
                          axis_names="HW",
                          shape=fn.cat(side, side, axis=0))
        pipe.set_outputs(in_data, out_data)
    pipe.build()
    for _ in range(num_iter):
        outs = [
            out.as_cpu() if isinstance(out, TensorListGPU) else out
            for out in pipe.run()
        ]
        for i in range(batch_size):
            in_data, out_data = \
                [outs[out_idx].at(i) for out_idx in range(len(outs))]
            in_shape = in_data.shape
            max_side = max(in_shape)
            for s in out_data.shape:
                assert s == max_side
            np.testing.assert_equal(out_data[:in_shape[0], :in_shape[1]],
                                    in_data)
            np.testing.assert_equal(out_data[in_shape[0]:, :], 0)
            np.testing.assert_equal(out_data[:, in_shape[1]:], 0)
Exemple #15
0
        def create_image_pipeline(
            batch_size,
            num_threads,
            device_id,
            image0_list,
            image1_list,
            flow_list,
            valBool,
        ):
            pipeline = Pipeline(batch_size, num_threads, device_id, seed=2)
            with pipeline:
                if valBool:
                    shuffleBool = False
                else:
                    shuffleBool = True
                """ READ FILES """
                image0, _ = fn.readers.file(
                    file_root=args.data,
                    files=image0_list,
                    random_shuffle=shuffleBool,
                    name="Reader",
                    seed=1,
                )
                image1, _ = fn.readers.file(
                    file_root=args.data,
                    files=image1_list,
                    random_shuffle=shuffleBool,
                    seed=1,
                )
                flo = fn.readers.numpy(
                    file_root=args.data,
                    files=flow_list,
                    random_shuffle=shuffleBool,
                    seed=1,
                )
                """ DECODE AND RESHAPE """
                image0 = fn.decoders.image(image0, device="cpu")
                image0 = fn.reshape(image0, layout="HWC")
                image1 = fn.decoders.image(image1, device="cpu")
                image1 = fn.reshape(image1, layout="HWC")
                images = fn.cat(image0, image1, axis=2)
                flo = fn.reshape(flo, layout="HWC")

                if valBool:
                    images = fn.resize(images, resize_x=162, resize_y=122)
                else:
                    """ CO-TRANSFORM """
                    # random translate
                    # angle_rng = fn.random.uniform(range=(-90, 90))
                    # images = fn.rotate(images, angle=angle_rng, fill_value=0)
                    # flo = fn.rotate(flo, angle=angle_rng, fill_value=0)

                    images = fn.random_resized_crop(
                        images,
                        size=[122, 162],  # 122, 162
                        random_aspect_ratio=[1.3, 1.4],
                        random_area=[0.8, 0.9],
                        seed=1,
                    )
                    flo = fn.random_resized_crop(
                        flo,
                        size=[122, 162],
                        random_aspect_ratio=[1.3, 1.4],
                        random_area=[0.8, 0.9],
                        seed=1,
                    )

                    # coin1 = fn.random.coin_flip(dtype=types.DALIDataType.BOOL, seed=10)
                    # coin1_n = coin1 ^ True
                    # coin2 = fn.random.coin_flip(dtype=types.DALIDataType.BOOL, seed=20)
                    # coin2_n = coin2 ^ True

                    # images = (
                    #     fn.flip(images, horizontal=1, vertical=1) * coin1 * coin2
                    #     + fn.flip(images, horizontal=1) * coin1 * coin2_n
                    #     + fn.flip(images, vertical=1) * coin1_n * coin2
                    #     + images * coin1_n * coin2_n
                    # )
                    # flo = (
                    #     fn.flip(flo, horizontal=1, vertical=1) * coin1 * coin2
                    #     + fn.flip(flo, horizontal=1) * coin1 * coin2_n
                    #     + fn.flip(flo, vertical=1) * coin1_n * coin2
                    #     + flo * coin1_n * coin2_n
                    # )
                    # _flo = flo
                    # flo_0 = fn.slice(_flo, axis_names="C", start=0, shape=1)
                    # flo_1 = fn.slice(_flo, axis_names="C", start=1, shape=1)
                    # flo_0 = flo_0 * coin1 * -1 + flo_0 * coin1_n
                    # flo_1 = flo_1 * coin2 * -1 + flo_1 * coin2_n
                    # # flo  = noflip + vertical flip + horizontal flip + both_flip

                    # # A horizontal flip is around the vertical axis (switch left and right)
                    # # So for a vertical flip coin1 is activated and needs to give +1, coin2 is activated needs to give -1
                    # # for a horizontal flip coin1 is activated and needs to be -1, coin2_n needs +1
                    # # no flip coin coin1_n +1, coin2_n +1

                    # flo = fn.cat(flo_0, flo_1, axis_name="C")
                """ NORMALIZE """
                images = fn.crop_mirror_normalize(
                    images,
                    mean=[0, 0, 0, 0, 0, 0],
                    std=[255, 255, 255, 255, 255, 255])
                images = fn.crop_mirror_normalize(
                    images,
                    mean=[0.45, 0.432, 0.411, 0.45, 0.432, 0.411],
                    std=[1, 1, 1, 1, 1, 1],
                )
                flo = fn.crop_mirror_normalize(
                    flo, mean=[0, 0], std=[args.div_flow, args.div_flow])

                pipeline.set_outputs(images, flo)
            return pipeline
Exemple #16
0
 def flip_1d(x):
     # TODO(janton): remove the layout trick when Flip supports arbitrary data layouts
     x = fn.reshape(x, shape=(-1, 1, 1), layout="HWC")
     x = fn.flip(x, vertical=1)
     x = fn.reshape(x, shape=(-1, ), layout="t")
     return x
 def pipeline():
     blob = fn.random.uniform(range=[0, 1], shape=(3, 200, 100))
     image = fn.reshape(blob, layout="CHW")
     per_channel = np.array([3, 5, 7])
     return fn.gaussian_blur(image, window_size=fn.per_frame(per_channel))
Exemple #18
0
def build_pipes(device, dim, batch_size, channel_first, mode, interp, dtype,
                w_input, h_input, d_input, use_size_arg, use_size_input,
                use_roi):
    dali_pipe = Pipeline(batch_size=batch_size,
                         num_threads=8,
                         device_id=0,
                         seed=1234)
    with dali_pipe:
        if dim == 2:
            files, labels = dali.fn.readers.caffe(path=db_2d_folder,
                                                  random_shuffle=True)
            images_cpu = dali.fn.decoders.image(files, device="cpu")
        else:
            images_cpu = dali.fn.external_source(
                source=random_3d_loader(batch_size), layout="DHWC")

        images_hwc = images_cpu if device == "cpu" else images_cpu.gpu()

        if channel_first:
            images = dali.fn.transpose(
                images_hwc,
                perm=[3, 0, 1, 2] if dim == 3 else [2, 0, 1],
                transpose_layout=True)
        else:
            images = images_hwc

        roi_start = None
        roi_end = None
        w = None
        h = None
        d = None
        size = None

        minibatch_size = 2 if dim == 3 else 8

        if use_roi:
            # Calculate absolute RoI
            in_size = fn.slice(fn.shapes(images_cpu),
                               types.Constant(0,
                                              dtype=types.FLOAT,
                                              device="cpu"),
                               types.Constant(dim,
                                              dtype=types.FLOAT,
                                              device="cpu"),
                               axes=[0],
                               normalized_shape=False)
            roi_start = fn.random.uniform(range=(0, 0.4), shape=[dim
                                                                 ]) * in_size
            roi_end = fn.random.uniform(range=(0.6, 1.0), shape=[dim
                                                                 ]) * in_size

        size_range = (10, 200) if dim == 3 else (10, 1000)

        if use_size_arg:
            if use_size_input:
                mask = fn.cast(fn.random.uniform(range=(0.8, 1.9),
                                                 shape=[dim]),
                               dtype=types.INT32)
                size = fn.random.uniform(range=size_range, shape=[dim]) * mask
            else:
                size = [300, 400] if dim == 2 else [80, 100, 120]

            resized = resize_dali(images,
                                  channel_first,
                                  dtype,
                                  interp,
                                  mode,
                                  size,
                                  None,
                                  None,
                                  None,
                                  roi_start,
                                  roi_end,
                                  minibatch_size=minibatch_size,
                                  max_size=max_size(dim))
        else:
            if w_input:
                has_w = fn.random.coin_flip(probability=0.8)
                w = fn.random.uniform(range=size_range) * has_w
            else:
                w = 320  # some fixed value

            if h_input:
                has_h = fn.random.coin_flip(probability=0.8)
                h = fn.random.uniform(range=size_range) * has_h
            else:
                h = 240  # some other fixed value

            if dim >= 3:
                if d_input:
                    has_d = fn.random.coin_flip(probability=0.8)
                    d = fn.random.uniform(range=size_range) * has_d
                else:
                    d = 31  # some other fixed value

            resized = resize_dali(images,
                                  channel_first,
                                  dtype,
                                  interp,
                                  mode,
                                  None,
                                  w,
                                  h,
                                  d,
                                  roi_start,
                                  roi_end,
                                  minibatch_size=minibatch_size,
                                  max_size=max_size(dim))

        outputs = [images, resized]
        if roi_start is not None and roi_end is not None:
            outputs += [roi_start, roi_end]

        for x in (d, h, w, size):
            if x is not None:
                if isinstance(x, _DataNode):
                    outputs.append(x)
                else:
                    outputs.append(
                        types.Constant(np.array(x, dtype=np.float32)))

        dali_pipe.set_outputs(*outputs)

    pil_pipe = Pipeline(batch_size=batch_size,
                        num_threads=8,
                        device_id=0,
                        exec_async=False,
                        exec_pipelined=False)
    with pil_pipe:
        images = fn.external_source(name="images",
                                    layout=layout_str(dim, channel_first))
        sizes = fn.external_source(name="size")
        roi_start = fn.external_source(name="roi_start")
        roi_end = fn.external_source(name="roi_end")
        resized = resize_PIL(dim, channel_first, dtype, interp, images, sizes,
                             roi_start, roi_end)
        resized = fn.reshape(resized, layout=layout_str(dim, channel_first))
        pil_pipe.set_outputs(resized)
    dali_pipe.build()
    pil_pipe.build()

    return dali_pipe, pil_pipe
Exemple #19
0
 def define_graph(self):
     features = self.input(name="Reader")
     img = fn.reshape(features["X"].gpu(), shape=features["X_shape"], layout="CDHW")
     return img, features["fname"]
 def load_data(self):
     img, lbl = self.input_x(name="ReaderX"), self.input_y(name="ReaderY")
     img, lbl = fn.reshape(img, layout="CDHW"), fn.reshape(lbl, layout="CDHW")
     return img, lbl
 def define_graph(self):
     img, meta = self.input_x(name="ReaderX").gpu(), self.input_meta(name="ReaderY").gpu()
     img = fn.reshape(img, layout="CDHW")
     return img, meta
 def define_graph(self):
     img, lbl = self.input_x(name="ReaderX").gpu(), self.input_y(name="ReaderY").gpu()
     img, lbl = fn.reshape(img, layout="CDHW"), fn.reshape(lbl, layout="CDHW")
     return img, lbl
def reshape_pipe(shapes, src_dims=None, rel_shape=None):
    data = fn.external_source(lambda: get_data(shapes), batch=True, device = "cpu")
    return fn.reshape(data, src_dims=src_dims, rel_shape=rel_shape)