def pipe(device, out_type, files):
     encoded, _ = fn.readers.file(files=files)
     decoded = fn.decoders.image(encoded,
                                 device=device,
                                 output_type=out_type)
     peeked_shape = fn.peek_image_shape(encoded)
     return decoded, peeked_shape
예제 #2
0
def run_decode(data_path, out_type):
    batch_size = 4
    pipe = Pipeline(batch_size=batch_size, num_threads=4, device_id=0)
    input, _ = fn.file_reader(file_root=data_path,
                              shard_id=0,
                              num_shards=1,
                              name="reader")
    decoded = fn.image_decoder(input, output_type=types.RGB)
    decoded_shape = fn.shapes(decoded)
    raw_shape = fn.peek_image_shape(input, type=out_type)
    pipe.set_outputs(decoded, decoded_shape, raw_shape)
    pipe.build()
    samples = 0
    length = pipe.reader_meta(name="reader")['epoch_size']
    while samples < length:
        samples += batch_size
        (images, decoded_shape, raw_shape) = pipe.run()
        for i in range(batch_size):
            # as we are asking for a particular color space it may differ from the source image, so don't compare it
            image = images.at(i)
            shape_type = dali_types_to_np(out_type)
            for d in range(len(image.shape) - 1):
                assert image.shape[d] == decoded_shape.at(
                    i)[d], "{} vs {}".format(image.shape[d],
                                             decoded_shape.at(i)[d])
                assert image.shape[d] == raw_shape.at(i)[d], "{} vs {}".format(
                    image.shape[d],
                    raw_shape.at(i)[d])
                assert raw_shape.at(
                    i)[d].dtype == shape_type, "{} vs {}".format(
                        raw_shape.at(i)[d].dtyp, shape_type)
예제 #3
0
    def define_graph(self):
        inputs, bboxes, labels, polygons, vertices = fn.readers.coco(
                                            file_root=self.file_root,
                                            annotations_file=self.annotation_file,
                                            skip_empty=True,
                                            shard_id=self.share_id,
                                            num_shards=self.num_gpus,
                                            ratio=True,
                                            ltrb=True,
                                            polygon_masks = True,
                                            random_shuffle=self.random_shuffle,
                                            shuffle_after_epoch=self.shuffle_after_epoch,
                                            name="Reader")

        input_shape = fn.slice(fn.cast(fn.peek_image_shape(inputs), dtype=types.INT32), 0, 2, axes=[0])
        h = fn.slice(input_shape, 0, 1, axes = [0], dtype=types.FLOAT)
        w = fn.slice(input_shape, 1, 1, axes = [0], dtype=types.FLOAT)
        short_side = math.min(w, h)        
        scale = fn.random.uniform(range=[0.3, 1.])
        crop_side = fn.cast(math.ceil(scale * short_side), dtype=types.INT32)    
        crop_shape = fn.cat(crop_side, crop_side)
        anchor_rel, shape_rel, bboxes, labels, bbox_indices = fn.random_bbox_crop(
                        bboxes,
                        labels,
                        input_shape=input_shape,
                        crop_shape=crop_shape,
                        shape_layout="HW",
                        thresholds=[0.],            # No minimum intersection-over-union, for demo purposes
                        allow_no_crop=False,        # No-crop is disallowed, for demo purposes 
                        seed=-1,                    # Fixed random seed for deterministic results
                        bbox_layout="xyXY",         # left, top, right, back
                        output_bbox_indices=True,   # Output indices of the filtered bounding boxes
                        total_num_attempts=1024,
        )
        polygons, vertices = fn.segmentation.select_masks(
            bbox_indices, polygons, vertices
        )
        images = fn.decoders.image_slice(
            inputs, anchor_rel, shape_rel, normalized_anchor=False, normalized_shape=False, device='mixed'
        )
        images = fn.color_space_conversion(images, image_type=types.RGB, output_type=types.BGR)
        MT_1_vertices = fn.transforms.crop(
            to_start=(0.0, 0.0), to_end=fn.cat(w, h)
        )    
        MT_2_vertices = fn.transforms.crop(
            from_start=anchor_rel, from_end=(anchor_rel + shape_rel),
            to_start=(0.0, 0.0), to_end=(1., 1.)
        )    
        vertices = fn.coord_transform(fn.coord_transform(vertices, MT=MT_1_vertices), MT=MT_2_vertices)    
        targets = fn.cat( bboxes, fn.reshape(vertices, shape=[-1, 10]), axis=1)

        interp_methods = [types.INTERP_LINEAR, types.INTERP_CUBIC, types.INTERP_LANCZOS3, types.INTERP_GAUSSIAN, types.INTERP_NN, types.INTERP_TRIANGULAR]
        interp_method = fn.random.uniform(values=[int(x) for x in interp_methods], dtype=types.INT32)
        interp_method = fn.reinterpret(interp_method, dtype=types.INTERP_TYPE)
        images = fn.resize(images, dtype=types.FLOAT, size=self.input_dim, interp_type=interp_method)

        labels = labels.gpu()
        targets = targets.gpu()
        return (images, targets, labels)
예제 #4
0
 def peek_image_shape_pipe(max_batch_size, input_data, device):
     pipe = Pipeline(batch_size=max_batch_size, num_threads=4, device_id=0)
     encoded = fn.external_source(source=input_data,
                                  cycle=False,
                                  device='cpu')
     shape = fn.peek_image_shape(encoded, device=device)
     pipe.set_outputs(shape)
     return pipe
예제 #5
0
def test_peek_image_shape_cpu():
    pipe = Pipeline(batch_size=batch_size, num_threads=4, device_id=None)
    input, _ = fn.readers.file(file_root=images_dir, shard_id=0, num_shards=1)
    shapes = fn.peek_image_shape(input)
    pipe.set_outputs(shapes)
    pipe.build()
    for _ in range(3):
        pipe.run()
예제 #6
0
def setup_dali(
    image_file='/mnt/data/DATASETS/samples/images/image_110.jpg',
    image_dim=[800, 1600],
    batch_size=1,
    num_threads=4,
    device='mixed',
    device_id=0,
    output_dir='./out/',
):

    os.makedirs(os.path.dirname(output_dir), exist_ok=True)

    pipeline = dali.pipeline.Pipeline(batch_size=batch_size,
                                      num_threads=num_threads,
                                      device_id=device_id)

    with pipeline:
        data, _ = fn.file_reader(files=[image_file])
        # image preprocess
        images = fn.image_decoder(data, device=device)
        images = fn.resize(images,
                           size=image_dim,
                           mode="not_larger",
                           max_size=image_dim)
        images = fn.pad(images,
                        fill_value=0,
                        shape=[image_dim[0], image_dim[1], 1])
        images = fn.transpose(images, perm=[2, 0, 1])
        images = fn.cast(images, dtype=dali.types.FLOAT)
        images = images / 255.
        # input shape
        input_shape = np.float32((image_dim[0], image_dim[1], 1))
        # original shape
        shapes = fn.peek_image_shape(data)
        shapes = fn.cast(shapes, dtype=dali.types.FLOAT)
        # gather outputs
        out = [images, input_shape, shapes]
        pipeline.set_outputs(*out)

    pipeline.build()
    output = pipeline.run()
    img = output[0].at(0) if device == 'cpu' else output[0].as_cpu().at(0)

    img = img.transpose(1, 2, 0)  # HWC
    img = img[:, :, ::-1]  # BGR
    print(img)
    quit()
    cv2.imwrite(os.path.join(output_dir, 'dali_image.jpg'), img)
예제 #7
0
def setup_dali(
    input_name='DALI_INPUT_0',
    image_dim=[896, 1536],
    batch_size=1,
    num_threads=4,
    device='cpu',
    device_id=0,
    output_dir='./out/',
):

    pipeline = dali.pipeline.Pipeline(batch_size=batch_size,
                                      num_threads=num_threads,
                                      device_id=device_id)

    with pipeline:
        data = fn.external_source(name=input_name, device="cpu")
        # image preprocess
        images = fn.image_decoder(data, device=device)
        images = fn.resize(images,
                           size=image_dim,
                           mode="not_larger",
                           max_size=image_dim)
        images = fn.pad(images,
                        fill_value=0,
                        shape=[image_dim[0], image_dim[1], 1])
        images = fn.transpose(images, perm=[2, 0, 1])
        images = fn.cast(images, dtype=dali.types.FLOAT)
        images = images / 255.
        # input shape
        input_shape = np.float32((image_dim[0], image_dim[1], 1))
        # original shape
        shapes = fn.peek_image_shape(data)
        shapes = fn.cast(shapes, dtype=dali.types.FLOAT)
        # gather outputs
        out = [images, input_shape, shapes]
        pipeline.set_outputs(*out)

    os.makedirs(os.path.dirname(output_dir), exist_ok=True)
    pipeline.serialize(filename=os.path.join(output_dir, 'model.dali'))
예제 #8
0
 def pipe():
     encoded, _ = fn.readers.file(files=[normal, palette])
     peeked_shapes = fn.peek_image_shape(encoded)
     decoded = fn.decoders.image(encoded, device='cpu')
     return decoded, peeked_shapes
def check_bbox_random_crop_adjust_polygons(file_root,
                                           annotations_file,
                                           batch_size=3,
                                           num_iters=4,
                                           num_threads=4,
                                           device_id=0,
                                           seed=1234):
    pipe = Pipeline(batch_size=batch_size,
                    num_threads=num_threads,
                    device_id=device_id,
                    seed=seed)
    with pipe:
        # Read data from COCO
        # ratio=True means both bboxes and masks coordinates will be
        # relative to the image dimensions (range [0.0, 1.0])
        inputs, in_bboxes, labels, in_polygons, in_vertices = \
            fn.readers.coco(
                file_root=file_root, annotations_file=annotations_file, shard_id=0, num_shards=1,
                ratio=True, ltrb=True, polygon_masks=True
            )

        # Generate a random crop. out_bboxes are adjusted to the crop window
        slice_anchor, slice_shape, out_bboxes, labels, bbox_indices = \
            fn.random_bbox_crop(
                in_bboxes, labels,
                aspect_ratio=[0.5, 2.0], thresholds=[0, 0.1, 0.3, 0.5, 0.7, 0.9],
                scaling=[0.3, 1.0], bbox_layout='xyXY', output_bbox_indices=True
            )
        # Crop the image
        _ = fn.decoders.image_slice(inputs,
                                    slice_anchor,
                                    slice_shape,
                                    device='mixed',
                                    axis_names='WH')

        sel_polygons, sel_vertices = fn.segmentation.select_masks(
            bbox_indices, in_polygons, in_vertices)

        # Adjust masks coordinates to the coordinate space of the cropped image
        MT = fn.transforms.crop(from_start=slice_anchor,
                                from_end=(slice_anchor + slice_shape))
        out_vertices = fn.coord_transform(sel_vertices, MT=MT)

        # Converting to absolute coordinates (demo purposes)
        image_shape = fn.peek_image_shape(inputs, dtype=types.FLOAT)
        h = fn.slice(image_shape, 0, 1, axes=[0])
        w = fn.slice(image_shape, 1, 1, axes=[0])

        # Original bboxes
        bbox_x = fn.slice(in_bboxes, 0, 1, axes=[1])
        bbox_y = fn.slice(in_bboxes, 1, 1, axes=[1])
        bbox_X = fn.slice(in_bboxes, 2, 1, axes=[1])
        bbox_Y = fn.slice(in_bboxes, 3, 1, axes=[1])
        in_bboxes_abs = fn.cat(bbox_x * w,
                               bbox_y * h,
                               bbox_X * w,
                               bbox_Y * h,
                               axis=1)

        # Transform to convert relative coordinates to absolute
        scale_rel_to_abs = fn.transforms.scale(scale=fn.cat(w, h))

        # Selected vertices (relative coordinates)
        sel_vertices_abs = fn.coord_transform(out_vertices,
                                              MT=scale_rel_to_abs)

        # Output bboxes
        bbox2_x = fn.slice(out_bboxes, 0, 1, axes=[1])
        bbox2_y = fn.slice(out_bboxes, 1, 1, axes=[1])
        bbox2_X = fn.slice(out_bboxes, 2, 1, axes=[1])
        bbox2_Y = fn.slice(out_bboxes, 3, 1, axes=[1])
        out_bboxes_abs = fn.cat(bbox2_x * w,
                                bbox2_y * h,
                                bbox2_X * w,
                                bbox2_Y * h,
                                axis=1)

        # Output vertices (absolute coordinates)
        out_vertices_abs = fn.coord_transform(out_vertices,
                                              MT=scale_rel_to_abs)

        # Clamped coordinates
        out_vertices_clamped = math.clamp(out_vertices, 0.0, 1.0)
        out_vertices_clamped_abs = fn.coord_transform(out_vertices_clamped,
                                                      MT=scale_rel_to_abs)

    pipe.set_outputs(in_vertices, sel_vertices, sel_vertices_abs, out_vertices,
                     out_vertices_clamped, out_vertices_abs,
                     out_vertices_clamped_abs, in_bboxes, in_bboxes_abs,
                     out_bboxes, out_bboxes_abs, in_polygons, sel_polygons,
                     image_shape, slice_anchor, slice_shape, bbox_indices)
    pipe.build()
    # Enough iterations to see an example with more than one bounding box
    for i in range(num_iters):
        outs = pipe.run()
        for j in range(batch_size):
            (in_vertices, sel_vertices, sel_vertices_abs, out_vertices,
             out_vertices_clamped, out_vertices_abs, out_vertices_clamped_abs,
             in_bboxes, in_bboxes_abs, out_bboxes, out_bboxes_abs, in_polygons,
             sel_polygons, image_shape, slice_anchor, slice_shape,
             bbox_indices) = (outs[k].at(j) for k in range(len(outs)))

            # Checking that the output polygon descriptors are the ones associated with the
            # selected bounding boxes
            expected_polygons_list = []
            expected_vertices_list = []
            ver_count = 0
            for k in range(in_polygons.shape[0]):
                mask_id = in_polygons[k][0]
                in_ver_start_idx = in_polygons[k][1]
                in_ver_end_idx = in_polygons[k][2]
                pol_nver = in_ver_end_idx - in_ver_start_idx
                if mask_id in bbox_indices:
                    expected_polygons_list.append(
                        [mask_id, ver_count, ver_count + pol_nver])
                    for j in range(in_ver_start_idx, in_ver_end_idx):
                        expected_vertices_list.append(in_vertices[j])
                    ver_count = ver_count + pol_nver
            expected_sel_polygons = np.array(expected_polygons_list)
            np.testing.assert_equal(expected_sel_polygons, sel_polygons)

            # Checking the selected vertices correspond to the selected masks
            expected_sel_vertices = np.array(expected_vertices_list)
            np.testing.assert_equal(expected_sel_vertices, sel_vertices)

            # Chekc that the vertices are correctly mapped to the cropping window
            expected_out_vertices = np.copy(expected_sel_vertices)
            crop_x, crop_y = slice_anchor
            crop_w, crop_h = slice_shape
            for v in range(expected_out_vertices.shape[0]):
                expected_out_vertices[v, 0] = (expected_out_vertices[v, 0] -
                                               crop_x) / crop_w
                expected_out_vertices[v, 1] = (expected_out_vertices[v, 1] -
                                               crop_y) / crop_h
            np.testing.assert_allclose(expected_out_vertices,
                                       out_vertices,
                                       rtol=1e-4)

            # Checking the conversion to absolute coordinates
            h, w, _ = image_shape
            wh = np.array([w, h])
            whwh = np.array([w, h, w, h])
            expected_out_vertices_abs = expected_out_vertices * wh
            np.testing.assert_allclose(expected_out_vertices_abs,
                                       out_vertices_abs,
                                       rtol=1e-4)

            # Checking clamping of the relative coordinates
            expected_out_vertices_clamped = np.clip(expected_out_vertices,
                                                    a_min=0.0,
                                                    a_max=1.0)
            np.testing.assert_allclose(expected_out_vertices_clamped,
                                       out_vertices_clamped,
                                       rtol=1e-4)

            # Checking clamping of the absolute coordinates
            expected_out_vertices_clamped_abs = np.clip(
                expected_out_vertices_abs, 0, wh)
            np.testing.assert_allclose(expected_out_vertices_clamped_abs,
                                       out_vertices_clamped_abs,
                                       rtol=1e-4)

            # Checking scaling of the bounding boxes
            expected_in_bboxes_abs = in_bboxes * whwh
            np.testing.assert_allclose(expected_in_bboxes_abs,
                                       in_bboxes_abs,
                                       rtol=1e-4)

            # Check box selection and mapping to the cropping window
            expected_out_bboxes = np.copy(in_bboxes[bbox_indices, :])
            for k in range(expected_out_bboxes.shape[0]):
                expected_out_bboxes[k, 0] = (expected_out_bboxes[k, 0] -
                                             crop_x) / crop_w
                expected_out_bboxes[k, 1] = (expected_out_bboxes[k, 1] -
                                             crop_y) / crop_h
                expected_out_bboxes[k, 2] = (expected_out_bboxes[k, 2] -
                                             crop_x) / crop_w
                expected_out_bboxes[k, 3] = (expected_out_bboxes[k, 3] -
                                             crop_y) / crop_h
            expected_out_bboxes = np.clip(expected_out_bboxes,
                                          a_min=0.0,
                                          a_max=1.0)
            np.testing.assert_allclose(expected_out_bboxes,
                                       out_bboxes,
                                       rtol=1e-4)

            expected_out_bboxes_abs = expected_out_bboxes * whwh
            np.testing.assert_allclose(expected_out_bboxes_abs,
                                       out_bboxes_abs,
                                       rtol=1e-4)