Example #1
0
    def define_graph(self):
        inputs, bboxes, labels, polygons, vertices = fn.readers.coco(
                                            file_root=self.file_root,
                                            annotations_file=self.annotation_file,
                                            skip_empty=True,
                                            shard_id=self.share_id,
                                            num_shards=self.num_gpus,
                                            ratio=True,
                                            ltrb=True,
                                            polygon_masks = True,
                                            random_shuffle=self.random_shuffle,
                                            shuffle_after_epoch=self.shuffle_after_epoch,
                                            name="Reader")

        input_shape = fn.slice(fn.cast(fn.peek_image_shape(inputs), dtype=types.INT32), 0, 2, axes=[0])
        h = fn.slice(input_shape, 0, 1, axes = [0], dtype=types.FLOAT)
        w = fn.slice(input_shape, 1, 1, axes = [0], dtype=types.FLOAT)
        short_side = math.min(w, h)        
        scale = fn.random.uniform(range=[0.3, 1.])
        crop_side = fn.cast(math.ceil(scale * short_side), dtype=types.INT32)    
        crop_shape = fn.cat(crop_side, crop_side)
        anchor_rel, shape_rel, bboxes, labels, bbox_indices = fn.random_bbox_crop(
                        bboxes,
                        labels,
                        input_shape=input_shape,
                        crop_shape=crop_shape,
                        shape_layout="HW",
                        thresholds=[0.],            # No minimum intersection-over-union, for demo purposes
                        allow_no_crop=False,        # No-crop is disallowed, for demo purposes 
                        seed=-1,                    # Fixed random seed for deterministic results
                        bbox_layout="xyXY",         # left, top, right, back
                        output_bbox_indices=True,   # Output indices of the filtered bounding boxes
                        total_num_attempts=1024,
        )
        polygons, vertices = fn.segmentation.select_masks(
            bbox_indices, polygons, vertices
        )
        images = fn.decoders.image_slice(
            inputs, anchor_rel, shape_rel, normalized_anchor=False, normalized_shape=False, device='mixed'
        )
        images = fn.color_space_conversion(images, image_type=types.RGB, output_type=types.BGR)
        MT_1_vertices = fn.transforms.crop(
            to_start=(0.0, 0.0), to_end=fn.cat(w, h)
        )    
        MT_2_vertices = fn.transforms.crop(
            from_start=anchor_rel, from_end=(anchor_rel + shape_rel),
            to_start=(0.0, 0.0), to_end=(1., 1.)
        )    
        vertices = fn.coord_transform(fn.coord_transform(vertices, MT=MT_1_vertices), MT=MT_2_vertices)    
        targets = fn.cat( bboxes, fn.reshape(vertices, shape=[-1, 10]), axis=1)

        interp_methods = [types.INTERP_LINEAR, types.INTERP_CUBIC, types.INTERP_LANCZOS3, types.INTERP_GAUSSIAN, types.INTERP_NN, types.INTERP_TRIANGULAR]
        interp_method = fn.random.uniform(values=[int(x) for x in interp_methods], dtype=types.INT32)
        interp_method = fn.reinterpret(interp_method, dtype=types.INTERP_TYPE)
        images = fn.resize(images, dtype=types.FLOAT, size=self.input_dim, interp_type=interp_method)

        labels = labels.gpu()
        targets = targets.gpu()
        return (images, targets, labels)
Example #2
0
def _test_empty_input(device):
    pipe = dali.pipeline.Pipeline(batch_size=2,
                                  num_threads=4,
                                  device_id=0,
                                  seed=1234)
    with pipe:
        X = fn.external_source(source=[[np.zeros([0, 3]),
                                        np.zeros([0, 3])]],
                               device="cpu",
                               layout="AB")
        Y = fn.coord_transform(X, M=(1, 2, 3, 4, 5, 6), T=(1, 2))
        pipe.set_outputs(Y)
    pipe.build()
    o = pipe.run()
    assert o[0].layout() == "AB"
    assert len(o[0]) == 2
    for i in range(len(o[0])):
        assert o[0].at(0).size == 0
def check_bbox_random_crop_adjust_polygons(file_root,
                                           annotations_file,
                                           batch_size=3,
                                           num_iters=4,
                                           num_threads=4,
                                           device_id=0,
                                           seed=1234):
    pipe = Pipeline(batch_size=batch_size,
                    num_threads=num_threads,
                    device_id=device_id,
                    seed=seed)
    with pipe:
        # Read data from COCO
        # ratio=True means both bboxes and masks coordinates will be
        # relative to the image dimensions (range [0.0, 1.0])
        inputs, in_bboxes, labels, in_polygons, in_vertices = \
            fn.readers.coco(
                file_root=file_root, annotations_file=annotations_file, shard_id=0, num_shards=1,
                ratio=True, ltrb=True, polygon_masks=True
            )

        # Generate a random crop. out_bboxes are adjusted to the crop window
        slice_anchor, slice_shape, out_bboxes, labels, bbox_indices = \
            fn.random_bbox_crop(
                in_bboxes, labels,
                aspect_ratio=[0.5, 2.0], thresholds=[0, 0.1, 0.3, 0.5, 0.7, 0.9],
                scaling=[0.3, 1.0], bbox_layout='xyXY', output_bbox_indices=True
            )
        # Crop the image
        _ = fn.decoders.image_slice(inputs,
                                    slice_anchor,
                                    slice_shape,
                                    device='mixed',
                                    axis_names='WH')

        sel_polygons, sel_vertices = fn.segmentation.select_masks(
            bbox_indices, in_polygons, in_vertices)

        # Adjust masks coordinates to the coordinate space of the cropped image
        MT = fn.transforms.crop(from_start=slice_anchor,
                                from_end=(slice_anchor + slice_shape))
        out_vertices = fn.coord_transform(sel_vertices, MT=MT)

        # Converting to absolute coordinates (demo purposes)
        image_shape = fn.peek_image_shape(inputs, dtype=types.FLOAT)
        h = fn.slice(image_shape, 0, 1, axes=[0])
        w = fn.slice(image_shape, 1, 1, axes=[0])

        # Original bboxes
        bbox_x = fn.slice(in_bboxes, 0, 1, axes=[1])
        bbox_y = fn.slice(in_bboxes, 1, 1, axes=[1])
        bbox_X = fn.slice(in_bboxes, 2, 1, axes=[1])
        bbox_Y = fn.slice(in_bboxes, 3, 1, axes=[1])
        in_bboxes_abs = fn.cat(bbox_x * w,
                               bbox_y * h,
                               bbox_X * w,
                               bbox_Y * h,
                               axis=1)

        # Transform to convert relative coordinates to absolute
        scale_rel_to_abs = fn.transforms.scale(scale=fn.cat(w, h))

        # Selected vertices (relative coordinates)
        sel_vertices_abs = fn.coord_transform(out_vertices,
                                              MT=scale_rel_to_abs)

        # Output bboxes
        bbox2_x = fn.slice(out_bboxes, 0, 1, axes=[1])
        bbox2_y = fn.slice(out_bboxes, 1, 1, axes=[1])
        bbox2_X = fn.slice(out_bboxes, 2, 1, axes=[1])
        bbox2_Y = fn.slice(out_bboxes, 3, 1, axes=[1])
        out_bboxes_abs = fn.cat(bbox2_x * w,
                                bbox2_y * h,
                                bbox2_X * w,
                                bbox2_Y * h,
                                axis=1)

        # Output vertices (absolute coordinates)
        out_vertices_abs = fn.coord_transform(out_vertices,
                                              MT=scale_rel_to_abs)

        # Clamped coordinates
        out_vertices_clamped = math.clamp(out_vertices, 0.0, 1.0)
        out_vertices_clamped_abs = fn.coord_transform(out_vertices_clamped,
                                                      MT=scale_rel_to_abs)

    pipe.set_outputs(in_vertices, sel_vertices, sel_vertices_abs, out_vertices,
                     out_vertices_clamped, out_vertices_abs,
                     out_vertices_clamped_abs, in_bboxes, in_bboxes_abs,
                     out_bboxes, out_bboxes_abs, in_polygons, sel_polygons,
                     image_shape, slice_anchor, slice_shape, bbox_indices)
    pipe.build()
    # Enough iterations to see an example with more than one bounding box
    for i in range(num_iters):
        outs = pipe.run()
        for j in range(batch_size):
            (in_vertices, sel_vertices, sel_vertices_abs, out_vertices,
             out_vertices_clamped, out_vertices_abs, out_vertices_clamped_abs,
             in_bboxes, in_bboxes_abs, out_bboxes, out_bboxes_abs, in_polygons,
             sel_polygons, image_shape, slice_anchor, slice_shape,
             bbox_indices) = (outs[k].at(j) for k in range(len(outs)))

            # Checking that the output polygon descriptors are the ones associated with the
            # selected bounding boxes
            expected_polygons_list = []
            expected_vertices_list = []
            ver_count = 0
            for k in range(in_polygons.shape[0]):
                mask_id = in_polygons[k][0]
                in_ver_start_idx = in_polygons[k][1]
                in_ver_end_idx = in_polygons[k][2]
                pol_nver = in_ver_end_idx - in_ver_start_idx
                if mask_id in bbox_indices:
                    expected_polygons_list.append(
                        [mask_id, ver_count, ver_count + pol_nver])
                    for j in range(in_ver_start_idx, in_ver_end_idx):
                        expected_vertices_list.append(in_vertices[j])
                    ver_count = ver_count + pol_nver
            expected_sel_polygons = np.array(expected_polygons_list)
            np.testing.assert_equal(expected_sel_polygons, sel_polygons)

            # Checking the selected vertices correspond to the selected masks
            expected_sel_vertices = np.array(expected_vertices_list)
            np.testing.assert_equal(expected_sel_vertices, sel_vertices)

            # Chekc that the vertices are correctly mapped to the cropping window
            expected_out_vertices = np.copy(expected_sel_vertices)
            crop_x, crop_y = slice_anchor
            crop_w, crop_h = slice_shape
            for v in range(expected_out_vertices.shape[0]):
                expected_out_vertices[v, 0] = (expected_out_vertices[v, 0] -
                                               crop_x) / crop_w
                expected_out_vertices[v, 1] = (expected_out_vertices[v, 1] -
                                               crop_y) / crop_h
            np.testing.assert_allclose(expected_out_vertices,
                                       out_vertices,
                                       rtol=1e-4)

            # Checking the conversion to absolute coordinates
            h, w, _ = image_shape
            wh = np.array([w, h])
            whwh = np.array([w, h, w, h])
            expected_out_vertices_abs = expected_out_vertices * wh
            np.testing.assert_allclose(expected_out_vertices_abs,
                                       out_vertices_abs,
                                       rtol=1e-4)

            # Checking clamping of the relative coordinates
            expected_out_vertices_clamped = np.clip(expected_out_vertices,
                                                    a_min=0.0,
                                                    a_max=1.0)
            np.testing.assert_allclose(expected_out_vertices_clamped,
                                       out_vertices_clamped,
                                       rtol=1e-4)

            # Checking clamping of the absolute coordinates
            expected_out_vertices_clamped_abs = np.clip(
                expected_out_vertices_abs, 0, wh)
            np.testing.assert_allclose(expected_out_vertices_clamped_abs,
                                       out_vertices_clamped_abs,
                                       rtol=1e-4)

            # Checking scaling of the bounding boxes
            expected_in_bboxes_abs = in_bboxes * whwh
            np.testing.assert_allclose(expected_in_bboxes_abs,
                                       in_bboxes_abs,
                                       rtol=1e-4)

            # Check box selection and mapping to the cropping window
            expected_out_bboxes = np.copy(in_bboxes[bbox_indices, :])
            for k in range(expected_out_bboxes.shape[0]):
                expected_out_bboxes[k, 0] = (expected_out_bboxes[k, 0] -
                                             crop_x) / crop_w
                expected_out_bboxes[k, 1] = (expected_out_bboxes[k, 1] -
                                             crop_y) / crop_h
                expected_out_bboxes[k, 2] = (expected_out_bboxes[k, 2] -
                                             crop_x) / crop_w
                expected_out_bboxes[k, 3] = (expected_out_bboxes[k, 3] -
                                             crop_y) / crop_h
            expected_out_bboxes = np.clip(expected_out_bboxes,
                                          a_min=0.0,
                                          a_max=1.0)
            np.testing.assert_allclose(expected_out_bboxes,
                                       out_bboxes,
                                       rtol=1e-4)

            expected_out_bboxes_abs = expected_out_bboxes * whwh
            np.testing.assert_allclose(expected_out_bboxes_abs,
                                       out_bboxes_abs,
                                       rtol=1e-4)
Example #4
0
def _run_test(device, batch_size, out_dim, in_dim, in_dtype, out_dtype, M_kind,
              T_kind):
    pipe = dali.pipeline.Pipeline(batch_size=batch_size,
                                  num_threads=4,
                                  device_id=0,
                                  seed=1234)
    with pipe:
        X = fn.external_source(source=get_data_source(batch_size, in_dim,
                                                      in_dtype),
                               device=device,
                               layout="NX")
        M = None
        T = None
        MT = None
        if T_kind == "fused":
            MT = make_param(M_kind, [out_dim, in_dim + 1])
        else:
            M = make_param(M_kind, [out_dim, in_dim])
            T = make_param(T_kind, [out_dim])

        Y = fn.coord_transform(
            X,
            MT=MT.flatten().tolist() if isinstance(MT, np.ndarray) else MT,
            M=M.flatten().tolist() if isinstance(M, np.ndarray) else M,
            T=T.flatten().tolist() if isinstance(T, np.ndarray) else T,
            dtype=dali_type(out_dtype))
        if M is None:
            M = 1
        if T is None:
            T = 0
        if MT is None:
            MT = 0

        M, T, MT = (x if isinstance(x, dali.data_node.DataNode) else
                    dali.types.Constant(x, dtype=dali.types.FLOAT)
                    for x in (M, T, MT))

        pipe.set_outputs(X, Y, M, T, MT)

    pipe.build()
    for iter in range(3):
        outputs = pipe.run()
        outputs = [x.as_cpu() if hasattr(x, "as_cpu") else x for x in outputs]
        ref = []
        scale = 1
        for idx in range(batch_size):
            X = outputs[0].at(idx)
            if T_kind == "fused":
                MT = outputs[4].at(idx)
                if MT.size == 1:
                    M = MT
                    T = 0
                else:
                    M = MT[:, :-1]
                    T = MT[:, -1]
            else:
                M = outputs[2].at(idx)
                T = outputs[3].at(idx)

            if M.size == 1:
                Y = X.astype(np.float32) * M + T
            else:
                Y = np.matmul(X.astype(np.float32), M.transpose()) + T

            if np.issubdtype(out_dtype, np.integer):
                info = np.iinfo(out_dtype)
                Y = Y.clip(info.min, info.max)

            ref.append(Y)
            scale = max(scale,
                        np.max(np.abs(Y)) -
                        np.min(np.abs(Y))) if Y.size > 0 else 1
        avg = 1e-6 * scale
        eps = 1e-6 * scale
        if out_dtype != np.float32:  # headroom for rounding
            avg += 0.33
            eps += 0.5
        check_batch(outputs[1],
                    ref,
                    batch_size,
                    eps,
                    eps,
                    expected_layout="NX",
                    compare_layouts=True)