コード例 #1
0
def pipeline_arithm_ops_cpu(source):
    data = fn.external_source(source=source, layout="HWC")
    processed = (data * 2,
                 data + 2,
                 data - 2,
                 data / 2,
                 data // 2,
                 data ** 2,
                 data == 2,
                 data != 2,
                 data < 2,
                 data <= 2,
                 data > 2,
                 data >= 2,
                 data & 2,
                 data | 2,
                 data ^ 2,
                 dmath.abs(data),
                 dmath.fabs(data),
                 dmath.floor(data),
                 dmath.ceil(data),
                 dmath.pow(data, 2),
                 dmath.fpow(data, 1.5),
                 dmath.min(data, 2),
                 dmath.max(data, 50),
                 dmath.clamp(data, 10, 50),
                 dmath.sqrt(data),
                 dmath.rsqrt(data),
                 dmath.cbrt(data),
                 dmath.exp(data),
                 dmath.exp(data),
                 dmath.log(data),
                 dmath.log2(data),
                 dmath.log10(data),
                 dmath.sin(data),
                 dmath.cos(data),
                 dmath.tan(data),
                 dmath.asin(data),
                 dmath.acos(data),
                 dmath.atan(data),
                 dmath.atan2(data, 3),
                 dmath.sinh(data),
                 dmath.cosh(data),
                 dmath.tanh(data),
                 dmath.asinh(data),
                 dmath.acosh(data),
                 dmath.atanh(data))
    return processed
コード例 #2
0
ファイル: test_dali_cpu_only.py プロジェクト: hannahaih/DALI
def test_arithm_ops_cpu():
    pipe = Pipeline(batch_size=batch_size, num_threads=4, device_id=None)
    data = fn.external_source(source=get_data, layout="HWC")
    processed = [
        data * 2, data + 2, data - 2, data / 2, data // 2, data**2, data == 2,
        data != 2, data < 2, data <= 2, data > 2, data >= 2, data & 2,
        data | 2, data ^ 2,
        dmath.abs(data),
        dmath.fabs(data),
        dmath.floor(data),
        dmath.ceil(data),
        dmath.pow(data, 2),
        dmath.fpow(data, 1.5),
        dmath.min(data, 2),
        dmath.max(data, 50),
        dmath.clamp(data, 10, 50),
        dmath.sqrt(data),
        dmath.rsqrt(data),
        dmath.cbrt(data),
        dmath.exp(data),
        dmath.exp(data),
        dmath.log(data),
        dmath.log2(data),
        dmath.log10(data),
        dmath.sin(data),
        dmath.cos(data),
        dmath.tan(data),
        dmath.asin(data),
        dmath.acos(data),
        dmath.atan(data),
        dmath.atan2(data, 3),
        dmath.sinh(data),
        dmath.cosh(data),
        dmath.tanh(data),
        dmath.asinh(data),
        dmath.acosh(data),
        dmath.atanh(data)
    ]
    pipe.set_outputs(*processed)
    pipe.build()
    for _ in range(3):
        pipe.run()
コード例 #3
0
def check_bbox_random_crop_adjust_polygons(file_root,
                                           annotations_file,
                                           batch_size=3,
                                           num_iters=4,
                                           num_threads=4,
                                           device_id=0,
                                           seed=1234):
    pipe = Pipeline(batch_size=batch_size,
                    num_threads=num_threads,
                    device_id=device_id,
                    seed=seed)
    with pipe:
        # Read data from COCO
        # ratio=True means both bboxes and masks coordinates will be
        # relative to the image dimensions (range [0.0, 1.0])
        inputs, in_bboxes, labels, in_polygons, in_vertices = \
            fn.readers.coco(
                file_root=file_root, annotations_file=annotations_file, shard_id=0, num_shards=1,
                ratio=True, ltrb=True, polygon_masks=True
            )

        # Generate a random crop. out_bboxes are adjusted to the crop window
        slice_anchor, slice_shape, out_bboxes, labels, bbox_indices = \
            fn.random_bbox_crop(
                in_bboxes, labels,
                aspect_ratio=[0.5, 2.0], thresholds=[0, 0.1, 0.3, 0.5, 0.7, 0.9],
                scaling=[0.3, 1.0], bbox_layout='xyXY', output_bbox_indices=True
            )
        # Crop the image
        _ = fn.decoders.image_slice(inputs,
                                    slice_anchor,
                                    slice_shape,
                                    device='mixed',
                                    axis_names='WH')

        sel_polygons, sel_vertices = fn.segmentation.select_masks(
            bbox_indices, in_polygons, in_vertices)

        # Adjust masks coordinates to the coordinate space of the cropped image
        MT = fn.transforms.crop(from_start=slice_anchor,
                                from_end=(slice_anchor + slice_shape))
        out_vertices = fn.coord_transform(sel_vertices, MT=MT)

        # Converting to absolute coordinates (demo purposes)
        image_shape = fn.peek_image_shape(inputs, dtype=types.FLOAT)
        h = fn.slice(image_shape, 0, 1, axes=[0])
        w = fn.slice(image_shape, 1, 1, axes=[0])

        # Original bboxes
        bbox_x = fn.slice(in_bboxes, 0, 1, axes=[1])
        bbox_y = fn.slice(in_bboxes, 1, 1, axes=[1])
        bbox_X = fn.slice(in_bboxes, 2, 1, axes=[1])
        bbox_Y = fn.slice(in_bboxes, 3, 1, axes=[1])
        in_bboxes_abs = fn.cat(bbox_x * w,
                               bbox_y * h,
                               bbox_X * w,
                               bbox_Y * h,
                               axis=1)

        # Transform to convert relative coordinates to absolute
        scale_rel_to_abs = fn.transforms.scale(scale=fn.cat(w, h))

        # Selected vertices (relative coordinates)
        sel_vertices_abs = fn.coord_transform(out_vertices,
                                              MT=scale_rel_to_abs)

        # Output bboxes
        bbox2_x = fn.slice(out_bboxes, 0, 1, axes=[1])
        bbox2_y = fn.slice(out_bboxes, 1, 1, axes=[1])
        bbox2_X = fn.slice(out_bboxes, 2, 1, axes=[1])
        bbox2_Y = fn.slice(out_bboxes, 3, 1, axes=[1])
        out_bboxes_abs = fn.cat(bbox2_x * w,
                                bbox2_y * h,
                                bbox2_X * w,
                                bbox2_Y * h,
                                axis=1)

        # Output vertices (absolute coordinates)
        out_vertices_abs = fn.coord_transform(out_vertices,
                                              MT=scale_rel_to_abs)

        # Clamped coordinates
        out_vertices_clamped = math.clamp(out_vertices, 0.0, 1.0)
        out_vertices_clamped_abs = fn.coord_transform(out_vertices_clamped,
                                                      MT=scale_rel_to_abs)

    pipe.set_outputs(in_vertices, sel_vertices, sel_vertices_abs, out_vertices,
                     out_vertices_clamped, out_vertices_abs,
                     out_vertices_clamped_abs, in_bboxes, in_bboxes_abs,
                     out_bboxes, out_bboxes_abs, in_polygons, sel_polygons,
                     image_shape, slice_anchor, slice_shape, bbox_indices)
    pipe.build()
    # Enough iterations to see an example with more than one bounding box
    for i in range(num_iters):
        outs = pipe.run()
        for j in range(batch_size):
            (in_vertices, sel_vertices, sel_vertices_abs, out_vertices,
             out_vertices_clamped, out_vertices_abs, out_vertices_clamped_abs,
             in_bboxes, in_bboxes_abs, out_bboxes, out_bboxes_abs, in_polygons,
             sel_polygons, image_shape, slice_anchor, slice_shape,
             bbox_indices) = (outs[k].at(j) for k in range(len(outs)))

            # Checking that the output polygon descriptors are the ones associated with the
            # selected bounding boxes
            expected_polygons_list = []
            expected_vertices_list = []
            ver_count = 0
            for k in range(in_polygons.shape[0]):
                mask_id = in_polygons[k][0]
                in_ver_start_idx = in_polygons[k][1]
                in_ver_end_idx = in_polygons[k][2]
                pol_nver = in_ver_end_idx - in_ver_start_idx
                if mask_id in bbox_indices:
                    expected_polygons_list.append(
                        [mask_id, ver_count, ver_count + pol_nver])
                    for j in range(in_ver_start_idx, in_ver_end_idx):
                        expected_vertices_list.append(in_vertices[j])
                    ver_count = ver_count + pol_nver
            expected_sel_polygons = np.array(expected_polygons_list)
            np.testing.assert_equal(expected_sel_polygons, sel_polygons)

            # Checking the selected vertices correspond to the selected masks
            expected_sel_vertices = np.array(expected_vertices_list)
            np.testing.assert_equal(expected_sel_vertices, sel_vertices)

            # Chekc that the vertices are correctly mapped to the cropping window
            expected_out_vertices = np.copy(expected_sel_vertices)
            crop_x, crop_y = slice_anchor
            crop_w, crop_h = slice_shape
            for v in range(expected_out_vertices.shape[0]):
                expected_out_vertices[v, 0] = (expected_out_vertices[v, 0] -
                                               crop_x) / crop_w
                expected_out_vertices[v, 1] = (expected_out_vertices[v, 1] -
                                               crop_y) / crop_h
            np.testing.assert_allclose(expected_out_vertices,
                                       out_vertices,
                                       rtol=1e-4)

            # Checking the conversion to absolute coordinates
            h, w, _ = image_shape
            wh = np.array([w, h])
            whwh = np.array([w, h, w, h])
            expected_out_vertices_abs = expected_out_vertices * wh
            np.testing.assert_allclose(expected_out_vertices_abs,
                                       out_vertices_abs,
                                       rtol=1e-4)

            # Checking clamping of the relative coordinates
            expected_out_vertices_clamped = np.clip(expected_out_vertices,
                                                    a_min=0.0,
                                                    a_max=1.0)
            np.testing.assert_allclose(expected_out_vertices_clamped,
                                       out_vertices_clamped,
                                       rtol=1e-4)

            # Checking clamping of the absolute coordinates
            expected_out_vertices_clamped_abs = np.clip(
                expected_out_vertices_abs, 0, wh)
            np.testing.assert_allclose(expected_out_vertices_clamped_abs,
                                       out_vertices_clamped_abs,
                                       rtol=1e-4)

            # Checking scaling of the bounding boxes
            expected_in_bboxes_abs = in_bboxes * whwh
            np.testing.assert_allclose(expected_in_bboxes_abs,
                                       in_bboxes_abs,
                                       rtol=1e-4)

            # Check box selection and mapping to the cropping window
            expected_out_bboxes = np.copy(in_bboxes[bbox_indices, :])
            for k in range(expected_out_bboxes.shape[0]):
                expected_out_bboxes[k, 0] = (expected_out_bboxes[k, 0] -
                                             crop_x) / crop_w
                expected_out_bboxes[k, 1] = (expected_out_bboxes[k, 1] -
                                             crop_y) / crop_h
                expected_out_bboxes[k, 2] = (expected_out_bboxes[k, 2] -
                                             crop_x) / crop_w
                expected_out_bboxes[k, 3] = (expected_out_bboxes[k, 3] -
                                             crop_y) / crop_h
            expected_out_bboxes = np.clip(expected_out_bboxes,
                                          a_min=0.0,
                                          a_max=1.0)
            np.testing.assert_allclose(expected_out_bboxes,
                                       out_bboxes,
                                       rtol=1e-4)

            expected_out_bboxes_abs = expected_out_bboxes * whwh
            np.testing.assert_allclose(expected_out_bboxes_abs,
                                       out_bboxes_abs,
                                       rtol=1e-4)
コード例 #4
0
 def contrast_fn(self, img):
     min_, max_ = fn.reductions.min(img), fn.reductions.max(img)
     scale = self.random_augmentation(0.15, fn.uniform(range=(0.65, 1.5)), 1.0)
     img = math.clamp(img * scale, min_, max_)
     return img
コード例 #5
0
]

bitwise_operations = [((lambda x, y: x & y), "&"), ((lambda x, y: x | y), "|"),
                      ((lambda x, y: x ^ y), "^")]

comparisons_operations = [
    ((lambda x, y: x == y), "=="),
    ((lambda x, y: x != y), "!="),
    ((lambda x, y: x < y), "<"),
    ((lambda x, y: x <= y), "<="),
    ((lambda x, y: x > y), ">"),
    ((lambda x, y: x >= y), ">="),
]

# The observable behaviour for hi < lo is the same as numpy
ternary_operations = [(((lambda v, lo, hi: math.clamp(v, lo, hi)),
                        (lambda v, lo, hi: np.clip(v, lo, hi))), "clamp")]


def as_cpu(tl):
    if isinstance(tl, TensorListGPU):
        return tl.as_cpu()
    return tl


def max_dtype(kind, left_dtype, right_dtype):
    return np.dtype(kind + str(max(left_dtype.itemsize, right_dtype.itemsize)))


def float_bin_promote(left_dtype, right_dtype):
    if 'f' in left_dtype.kind and not 'f' in right_dtype.kind:
コード例 #6
0
                   (((lambda x, y: math.min(x, y)), (lambda x, y: np.minimum(x, y))), "min", default_range),
                   (((lambda x, y: math.max(x, y)), (lambda x, y: np.maximum(x, y))), "max", default_range)]

floaty_operations = [(((lambda x, y: x / y), (lambda x, y: x / y)), "/", default_range),
                     (((lambda x, y: math.fpow(x, y)), sane_pow), "fpow", pow_range),
                     (((lambda x, y: math.atan2(x, y)), (lambda x, y: np.arctan2(x, y))), "atan2", default_range)]

bitwise_operations = [((lambda x, y: x & y), "&"), ((lambda x, y: x | y), "|"),
                      ((lambda x, y: x ^ y), "^")]

comparisons_operations = [((lambda x, y: x == y), "=="), ((lambda x, y: x != y), "!="),
                          ((lambda x, y: x < y), "<"), ((lambda x, y: x <= y), "<="),
                          ((lambda x, y: x > y), ">"), ((lambda x, y: x >= y), ">="),]

# The observable behaviour for hi < lo is the same as numpy
ternary_operations = [(((lambda v, lo, hi: math.clamp(v, lo, hi)), (lambda v, lo, hi: np.clip(v, lo, hi))), "clamp")]

def as_cpu(tl):
    if isinstance(tl, TensorListGPU):
        return tl.as_cpu()
    return tl


def max_dtype(kind, left_dtype, right_dtype):
    return np.dtype(kind + str(max(left_dtype.itemsize, right_dtype.itemsize)))

def float_bin_promote(left_dtype, right_dtype):
    if 'f' in left_dtype.kind and not 'f' in right_dtype.kind:
        return left_dtype
    if not 'f' in left_dtype.kind and 'f' in right_dtype.kind:
        return right_dtype
コード例 #7
0
 def contrast_fn(self, img):
     scale = random_augmentation(0.15, fn.random.uniform(range=(0.65, 1.5)),
                                 1.0)
     return math.clamp(img * scale, fn.reductions.min(img),
                       fn.reductions.max(img))
コード例 #8
0
 def contrast_fn(self, img):
     min_, max_ = fn.reductions.min(img), fn.reductions.max(img)
     scale = self.random_augmentation(RAND_AUG_PROB, fn.uniform(range=(0.65, 1.5), **self.aug_seed_kwargs), 1.0)
     img = math.clamp(img * scale, min_, max_)
     return img