Beispiel #1
0
def kps_to_BB(kps):
    """
        Determine imgaug bounding box from imgaug keypoints
    """
    extend = 3  # To make the bounding box a little bit bigger
    kpsx = [kp.x for kp in kps.keypoints]
    minx = max(0, int(min(kpsx) - extend))
    maxx = min(imgW, int(max(kpsx) + extend))
    kpsy = [kp.y for kp in kps.keypoints]
    miny = max(0, int(min(kpsy) - extend))
    maxy = min(imgH, int(max(kpsy) + extend))
    if minx == maxx or miny == maxy:
        return None
    else:
        return ia.BoundingBox(x1=minx, y1=miny, x2=maxx, y2=maxy)
def kps_to_BB(kps):
    """
        通过keypoints确定图像中的Bbox
    """
    extend=3 # 
    kpsx=[kp.x for kp in kps.keypoints] #取每个keypoints的kp的x值
    minx=max(0,int(min(kpsx)-extend))# 左x
    maxx=min(imgW,int(max(kpsx)+extend))#右x
    kpsy=[kp.y for kp in kps.keypoints]#取每个keypoints的kp的y值
    miny=max(0,int(min(kpsy)-extend))# 左y
    maxy=min(imgH,int(max(kpsy)+extend)) # 右y
    if minx==maxx or miny==maxy:# 面积为0的情况--返回
        return None
    else:
        return ia.BoundingBox(x1=minx,y1=miny,x2=maxx,y2=maxy)# 否则返回一个Bbox
Beispiel #3
0
def img_augmentation(augmentation, img, bbox):
    """
    Augment image and bounding boxes !!
    :param augmentation: augmentation settings
    :param img: Only one image is needed. Not batch images
    :param bbox: [[x1,y1,x2,y2],[x1,y1,x2,y2]...]
    :return: Returns augment image and bbox
    """

    # img_copy = img.copy()
    image_shape = img.shape
    h, w = image_shape[0:2]

    # Convert the stochastic sequence of augmenters to a deterministic one.
    # The deterministic sequence will always apply the exactly same effects to the images.
    det = augmentation.to_deterministic()
    img_aug = det.augment_image(img)

    ia_bbox = list()
    for bounding_box in bbox:
        x1, y1, x2, y2 = bounding_box
        ia_bbox.append(ia.BoundingBox(x1=x1, y1=y1, x2=x2, y2=y2))

    bbs = ia.BoundingBoxesOnImage(ia_bbox, shape=image_shape)
    bbs_aug = det.augment_bounding_boxes([bbs])[0]
    # img = bbs_aug.draw_on_image(img)

    after_bbox = list()
    for bounding_box in bbs_aug.bounding_boxes:
        bbox_list = [
            bounding_box.x1_int, bounding_box.y1_int, bounding_box.x2_int,
            bounding_box.y2_int
        ]

        if bbox_list[0] >= w: bbox_list[0] = w - 1
        if bbox_list[1] >= h: bbox_list[1] = h - 1
        if bbox_list[2] >= w: bbox_list[2] = w - 1
        if bbox_list[3] >= h: bbox_list[3] = h - 1

        if bbox_list[0] == bbox_list[2] or bbox_list[1] == bbox_list[3]:
            return img_augmentation(augmentation, img, bbox)

        bbox_list = list(map(lambda x: max(x, 0), bbox_list))
        after_bbox.append(bbox_list)

    assert img_aug.shape == image_shape, "Augmentation shouldn't change image size"

    return img_aug, after_bbox
def get_images_bboxes(df=None, path_to_images=PATH_TO_AUG_IMAGES, head=None):
    """
    the function get a list of images and bboxes
    and print them side by side
    """

    # make a list of subgroups, each subgroup is of one file
    data = namedtuple('data', ['file', 'object'])
    grouped = df.groupby('filename')
    gd_data = []
    file_naems = []
    for file, x in zip(grouped.groups.keys(), grouped.groups):
        gd_data.append(data(file, grouped.get_group(x)))
        file_naems.append(file)

    filenames = []
    images = []
    bboxes = []
    classes = []
    for g in gd_data:
        grouped_image = g.object
        H = int(grouped_image['height'].iloc[0])
        W = int(grouped_image['width'].iloc[0])
        filename = grouped_image['filename'].iloc[0]

        # create list of bboxes object to each image
        bboxes_on_an_image = []
        classes_on_an_image = []
        for idx, row in grouped_image.iterrows():
            x1 = int(row['xmin'])
            y1 = int(row['ymin'])
            x2 = int(row['xmax'])
            y2 = int(row['ymax'])
            bboxes_on_an_image.append(ia.BoundingBox(x1, y1, x2, y2))
            classes_on_an_image.append(row['class'])

        bboxes.append(ia.BoundingBoxesOnImage(bboxes_on_an_image,
                                              shape=(H, W)))
        classes.append(classes_on_an_image)
        filenames.append(filename)

        # open an image
        path = os.path.join(path_to_images, filename)
        image = cv.imread(path)
        image = cv.cvtColor(image, cv.COLOR_BGR2RGB)
        images.append(image)

    return filenames, images, bboxes, classes
Beispiel #5
0
def create_sample():
    while True:
        num_objects = np.random.randint(MIN_OBJECTS_PER_IMAGE,
                                        MAX_OBJECTS_PER_IMAGE + 1)
        ok = False

        for _ in range(MAX_TRIES):
            bnd_boxes = []

            for _ in range(num_objects):
                bird_size = np.random.randint(MIN_SIZE, MAX_SIZE)
                bird_ar = np.random.uniform(MIN_ASPECT_RATIO, MAX_ASPECT_RATIO)
                bird_width = min(IMAGE_SIZE, int(bird_size * np.sqrt(bird_ar)))
                bird_height = min(IMAGE_SIZE,
                                  int(bird_size / np.sqrt(bird_ar)))

                y1 = np.random.randint(0, IMAGE_SIZE - bird_height + 1)
                x1 = np.random.randint(0, IMAGE_SIZE - bird_width + 1)
                y2 = y1 + bird_height
                x2 = x1 + bird_width
                bnd_boxes.append(
                    ia.BoundingBox(x1=x1, y1=y1, x2=x2, y2=y2, label='bird'))

            ok = True
            for i, box1 in enumerate(bnd_boxes):
                for j, box2 in enumerate(bnd_boxes):
                    if i != j and box1.intersection(box2):
                        ok = False

            if ok: break
        if ok: break

    image = sample_background()

    for box in bnd_boxes:
        y1 = box.y1_int
        x1 = box.x1_int
        y2 = box.y2_int
        x2 = box.x2_int

        backg_patch = image[y1:y2, x1:x2]
        image[y1:y2, x1:x2], nx1, ny1, nx2, ny2 = draw_object(backg_patch)

        box.x1, box.x2 = nx1 + x1, nx2 + x1
        box.y1, box.y2 = ny1 + y1, ny2 + y1

    bnd_boxes = ia.BoundingBoxesOnImage(bnd_boxes, shape=image.shape)
    return image, bnd_boxes
Beispiel #6
0
    def may_augment_bbox(self, aug, ori_shape, bboxes):
        imgaug_bboxes = []
        for bbox in bboxes:
            x1, y1, x2, y2 = bbox
            imgaug_bboxes.append(imgaug.BoundingBox(x1=x1, y1=y1, x2=x2,
                                                    y2=y2))
        imgaug_bboxes = aug.augment_bounding_boxes([
            imgaug.BoundingBoxesOnImage(imgaug_bboxes, shape=ori_shape)
        ])[0].clip_out_of_image()

        new_bboxes = []
        for box in imgaug_bboxes.bounding_boxes:
            new_bboxes.append(
                np.array([box.x1, box.y1, box.x2, box.y2], dtype=np.float32))

        return new_bboxes
Beispiel #7
0
 def _process(self, image, label):
     label = np.reshape(label, (-1, 5))
     label = [list(label[row, :]) for row in range(label.shape[0])]
     bbs = ia.BoundingBoxesOnImage([
         ia.BoundingBox(x1=x1, y1=y1, x2=x2, y2=y2, label=class_id)
         for x1, y1, x2, y2, class_id in label
     ],
                                   shape=image.shape)
     # 1. 数据增强
     if self.enhance:
         image, bbs = self._aug_images(image, bbs)
     # 2. 图像resize
     image, bbs = self._resize(image, bbs)
     # 3. 制作yolo标签
     label = self._to_yolo(bbs)
     return image, label
Beispiel #8
0
    def test_get_column_names__all_columns(self):
        batch = ia.UnnormalizedBatch(
            images=np.zeros((1, 2, 2, 3), dtype=np.uint8),
            heatmaps=[np.zeros((2, 2, 1), dtype=np.float32)],
            segmentation_maps=[np.zeros((2, 2, 1), dtype=np.int32)],
            keypoints=[[(0, 0)]],
            bounding_boxes=[[ia.BoundingBox(0, 0, 1, 1)]],
            polygons=[[ia.Polygon([(0, 0), (1, 0), (1, 1)])]],
            line_strings=[[ia.LineString([(0, 0), (1, 0)])]])

        names = batch.get_column_names()

        assert names == [
            "images", "heatmaps", "segmentation_maps", "keypoints",
            "bounding_boxes", "polygons", "line_strings"
        ]
Beispiel #9
0
def parseBoundingBoxes(annotations, imageShape):
	# input: annotation corresponding to a single bounding box
	# output: BoundingBox object
	boundingBoxes = []
	for annotation in annotations:
		xTopLeft = annotation["x"]
		yTopLeft = annotation["y"]
		xBottomRight = xTopLeft + annotation["width"]
		yBottomLeft = yTopLeft + annotation["height"]

		boundingBox = ia.BoundingBox(x1=xTopLeft,y1=yTopLeft,x2=xBottomRight, y2=yBottomLeft)
		
		boundingBoxes.append(boundingBox)
	
	bbs = ia.BoundingBoxesOnImage(boundingBoxes, shape=imageShape)
	return(bbs)
Beispiel #10
0
def generate_augmentations(paths, output_dir, repeats, maximun=-1):
    images = read_yolo_paths(paths)
    seq = aumentation_properties()
    print("Aumentanto {} x {}".format(len(images), repeats))
    results = []
    check_max = 0
    for image in images:
        check_max = check_max + 1
        if check_max >= maximun and maximun != -1: break
        filename = image["image"].split("/")[-1]
        data = cv.imread(image["image"])
        boxes = []
        names = []
        for line in image["boxes"]:
            if len(line) != 5 : continue
            name, x1, y1, x2, y2 = line
            x1, x2, y1, y2 = convert_axis(data.shape,
            (float(x1), float(y1), float(x2), float(y2)))
            boxes.append(ia.BoundingBox(x1=x1, y1=y1, x2=x2, y2=y2))
            names.append(name)
        boxes = ia.BoundingBoxesOnImage(boxes, shape=data.shape)
        seq_det = seq.to_deterministic()
        images_aug = seq_det.augment_images([data] * repeats)
        boxes_aug = seq_det.augment_bounding_boxes([boxes] * repeats)
        for i in range(0, len(images_aug)):
            image_aug = images_aug[i]
            box_aug = boxes_aug[i]
            result_filename = "aug-" + str(i) + filename
            cv.imwrite(os.path.join(output_dir, result_filename), image_aug)
            with open(os.path.join(output_dir, result_filename.split(".")[0] + ".txt"), "w") as yolo_txt:
                j = 0
                box_aug = box_aug.cut_out_of_image()
                box = box_aug.bounding_boxes
                for name in names:
                    #print(box[j].is_fully_within_image(data.shape))
                    if box[j].is_fully_within_image(data.shape):
                        x1, y1, x2, y2 = convert_yolo(data.shape, 
                        (box[j].x1, box[j].x2, box[j].y1 ,box[j].y2))
                        yolo_txt.write("%s %.4f %.4f %.4f %.4f\n" %
                        (name, x1, y1, x2, y2) )
                    j = j + 1
            i = i + 1
            results.append(os.path.join(output_dir, result_filename))

    shuffle_array(results)
    print("Datos generados ", len(results))
    return results 
Beispiel #11
0
    def cpu_augment(self, imgs, boxes):
        # for bx in boxes:
        #     self.assert_bboxes(bx)
        ia_bb = []
        for n in range(len(imgs)):
            c_boxes = []
            for i in boxes[n]:
                try:
                    c_boxes.append(
                        ia.BoundingBox(x1=i[0], y1=i[1], x2=i[2], y2=i[3]))
                except AssertionError:
                    print('Assertion Error: ', i)
            ia_bb.append(ia.BoundingBoxesOnImage(c_boxes, shape=imgs[n].shape))

        seq = iaa.Sequential([
            iaa.Sometimes(0.5, iaa.AddElementwise((-20, 20), per_channel=1)),
            iaa.Sometimes(0.5,
                          iaa.AdditiveGaussianNoise(scale=(0, 0.10 * 255))),
            iaa.Sometimes(0.5, iaa.Multiply((0.75, 1.25), per_channel=1)),
            iaa.Sometimes(0.5, iaa.MultiplyElementwise((0.75, 1.25))),
            iaa.Sometimes(0.5, iaa.GaussianBlur(sigma=(0.0, 1.0))),
            iaa.Fliplr(0.5),
            iaa.Sometimes(
                0.95,
                iaa.SomeOf(1, [
                    iaa.CoarseDropout(p=(0.10, 0.25),
                                      size_percent=(0.25, 0.5)),
                    iaa.CoarseDropout(p=(0.0, 0.15), size_percent=(0.1, 0.25)),
                    iaa.Dropout(p=(0, 0.25)),
                    iaa.CoarseSaltAndPepper(p=(0, 0.25),
                                            size_percent=(0.1, 0.2))
                ])),
            iaa.Affine(scale=iap.Choice(
                [iap.Uniform(0.4, 1), iap.Uniform(1, 3)]),
                       rotate=(-180, 180))
        ])
        seq_det = seq.to_deterministic()
        image_b_aug = seq_det.augment_images(imgs)
        bbs_b_aug = seq_det.augment_bounding_boxes(ia_bb)
        bbs_b_aug = [
            b.remove_out_of_image().cut_out_of_image() for b in bbs_b_aug
        ]
        return image_b_aug, [
            np.array([self.bbox_r(j) for j in i.bounding_boxes])
            for i in bbs_b_aug
        ]
Beispiel #12
0
def gen_batches(files, bs=5, scale=4.5, must_rotate=True):
    from imgaug.augmentables.batches import UnnormalizedBatch
    batches = []
    trees = []
    for xml_file in files:
        tree = ET.parse(xml_file)
        root = tree.getroot()
        img = root.find('path').text
        os.path.join(*img.split("\\"))
        raw_img = Image.open(img)
        clean(raw_img)
        raw_img = ImageOps.exif_transpose(raw_img)
        # Reduce image size
        if scale > 1:
            resize_image(raw_img, root, scale)
        img_array = np.array(raw_img)

        bbs = [
            ia.BoundingBox(int(member[4][0].text), int(member[4][1].text),
                           int(member[4][2].text), int(member[4][3].text))
            for member in root.findall('object')
        ]
        # Rotated
        if must_rotate:
            img_aug, bbs_aug = aug_by_value_list([img_array], [bbs],
                                                 fit_output=True,
                                                 rotate=MUST_ROTATE)
        else:
            img_aug, bbs_aug = [img_array], [bbs]
        # img_aug, bbs_aug = aug_by_value_list(img_aug, bbs_aug, scale=MUST_SCALE)
        # img_aug, bbs_aug = [], []
        # Original
        # img_aug.insert(0, img_array)
        # bbs_aug.insert(0, bbs)

        images = [
            img_aug_array for img_aug_array in img_aug for _ in range(bs)
        ]
        batches.append(
            UnnormalizedBatch(images=images,
                              bounding_boxes=[
                                  bbs_aug_array for bbs_aug_array in bbs_aug
                                  for _ in range(bs)
                              ]))
        trees.append(tree)
    return batches, trees
Beispiel #13
0
 def _measure_uniformity(cls, image, patch_size=5, n_patches=100):
     pshalf = (patch_size - 1) // 2
     image_f32 = image.astype(np.float32)
     grad_x = image_f32[:, 1:] - image_f32[:, :-1]
     grad_y = image_f32[1:, :] - image_f32[:-1, :]
     grad = np.abs(grad_x[1:, :] + grad_y[:, 1:])
     points_y = np.random.randint(0, image.shape[0], size=(n_patches, ))
     points_x = np.random.randint(0, image.shape[0], size=(n_patches, ))
     stds = []
     for y, x in zip(points_y, points_x):
         bb = ia.BoundingBox(x1=x - pshalf,
                             y1=y - pshalf,
                             x2=x + pshalf,
                             y2=y + pshalf)
         patch = bb.extract_from_image(grad)
         stds.append(np.std(patch))
     return 1 / (1 + np.std(stds))
Beispiel #14
0
def augment_keypoints(kpts, img, d):
    ia.seed(1)
    #seq = iaa.Sequential([iaa.Affine(rotate=int(d)), iaa.CropAndPad(px=(-75, 0), keep_size=False)]) # rotate by exactly d degrees
    seq = iaa.Sequential([
        iaa.Affine(rotate=int(d)),
        iaa.Affine(translate_px={
            "x": -100,
            "y": -100
        })
    ])
    #seq = iaa.Sequential([iaa.Affine(rotate=int(d))]) # rotate by exactly d degrees
    seq_det = seq.to_deterministic()
    keypoints_og = ia.KeypointsOnImage([
        ia.Keypoint(x=kpts[0], y=kpts[1]),
        ia.Keypoint(x=kpts[2], y=kpts[3]),
        ia.Keypoint(x=kpts[4], y=kpts[5]),
        ia.Keypoint(x=kpts[6], y=kpts[7])
    ],
                                       shape=img.shape)
    #bbs_original = ia.BoundingBoxesOnImage([ia.BoundingBox(x1=kpts[2], x2=kpts[4], y1=kpts[3], y2=kpts[5])], shape=img.shape)
    #keypoints_list.append(kpts)
    keypoints_aug = seq_det.augment_keypoints([keypoints_og])[0]
    for i in range(len(keypoints_og.keypoints)):
        before = keypoints_og.keypoints[i]
        after = keypoints_aug.keypoints[i]
        #print("Keypoint %d: (%.8f, %.8f) -> (%.8f, %.8f)" % (i, before.x, before.y, after.x, after.y))
    print("keypoints_aug: ", keypoints_aug)
    #bbs_aug = ia.BoundingBoxesOnImage([ia.BoundingBox(x1=keypoints_aug[0], x2=keypoints_aug[2], y1=keypoints_aug[1], y2=keypoints_aug[3])], shape=img.shape)
    new_coords = []
    for kp_idx, keypoint in enumerate(keypoints_aug.keypoints):
        keypoint_old = keypoints_og.keypoints[kp_idx]
        x_old, y_old = keypoint_old.x, keypoint_old.y
        x_new, y_new = keypoint.x, keypoint.y
        #print("[Keypoints for image #%s] before aug: x=%s y=%s | after aug: x=%s y=%s" % (img, x_old, y_old, x_new, y_new))
        new_pair = (int(x_new), int(y_new))
        new_coords.append(new_pair)
    print("new_coords: ", new_coords)
    bbs_aug = ia.BoundingBoxesOnImage([
        ia.BoundingBox(x1=new_coords[0],
                       x2=new_coords[2],
                       y1=new_coords[1],
                       y2=new_coords[3])
    ],
                                      shape=img.shape)
    return new_coords, bbs_aug
Beispiel #15
0
    def augmentImgsTest(self, plates):
        augPlates = []
        for plate in plates:
            plateIdx = plate["plateIdx"]
            plateImg = np.asarray(plate['plateImg'])
            plateBoxes = plate['plateBoxes']
            bbs = []
            seq = iaa.Sequential([
                iaa.Sometimes(0.9, iaa.GaussianBlur(sigma=(0, 0.9))),
                iaa.ContrastNormalization((0.75, 2.0)),
                iaa.AdditiveGaussianNoise(
                    loc=0, scale=(0.0, 0.2 * 255), per_channel=0.6),
                iaa.Multiply((0.8, 1.2), per_channel=0.2),
                iaa.Sometimes(0.8, iaa.Affine(rotate=(-2, 12),
                                              shear=(-2, 15))),
                iaa.Sometimes(0.8, iaa.Affine(shear=(-3, 9)))
            ],
                                 random_order=True)
            seq_det = seq.to_deterministic()

            for box in plateBoxes:
                bbs.append(ia.BoundingBox(box[0], box[1], box[2], box[3]))

            bbsOnImage = ia.BoundingBoxesOnImage(bbs, shape=plateImg.shape)
            imageAug = seq_det.augment_images([plateImg])[0]
            bboxAug = seq_det.augment_bounding_boxes([bbsOnImage])[0]
            bboxAug = bboxAug.remove_out_of_image().cut_out_of_image()

            finalImg = Image.fromarray(imageAug)
            bboxAugFormatted = []
            for idx, box in enumerate(bboxAug.bounding_boxes):
                bboxAugFormatted.append(
                    (box.x1, box.y1, box.x2, box.y2, plateBoxes[idx][4]))

            augPlates.append({
                "plateIdx": plateIdx,
                "plateImg": Image.fromarray(imageAug),
                "plateBoxes": bboxAugFormatted
            })

            # Visualize plate
            if self.visualizePlates:
                self.visualizePlate(finalImg)

        return augPlates
Beispiel #16
0
    def random_transform_group_entry(self, image: np.ndarray, annotations: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
        if self._augmenter is None:
            return image, annotations
        seq_det = self._augmenter.to_deterministic()

        image_aug = seq_det.augment_images([image])[0].astype('float32')

        if annotations.shape[0] > 0:
            bbs = ia.BoundingBoxesOnImage([
                ia.BoundingBox(x1=a[0], y1=a[1], x2=a[2], y2=a[3]) for a in annotations
            ], shape=image.shape)
            bbs_aug = seq_det.augment_bounding_boxes([bbs])[0]

            annotations_aug = np.array([np.array([bb.x1, bb.y1, bb.x2, bb.y2, an[4]])
                                        for (bb, an) in zip(bbs_aug.bounding_boxes, annotations)])
            return image_aug, annotations_aug
        else:
            return image_aug, annotations
def modify_image_train(img, bb):
    img_height, img_width = img.shape[0], img.shape[1]
    x_min, y_min, width, height = bb
    x1, y1, x2, y2 = to_imgaugbb(x_min, y_min, width, height)
    bb_array = ia.BoundingBoxesOnImage([ia.BoundingBox(x1, y1, x2, y2)],
                                       shape=(img_height, img_width))

    rgb_img = skimage.color.gray2rgb(img)
    imagenet_height, imagenet_width = 224, 224
    seq = iaa.Sequential(
        [iaa.Scale({
            "height": imagenet_height,
            "weight": imagenet_width
        })])
    seq_det = seq.to_deterministic()
    image_aug = seq_det.augment_images([rgb_img])[0]
    bbs_aug = seq_det.augment_bounding_boxes([bb_array])[0]
    return image_aug, bbs_aug
Beispiel #18
0
    def to_imgaug(self, shape):
        """
        Args:
            shape (tuple): shape of image that boxes belong to

        Example:
            >>> self = Boxes([[25, 30, 15, 10]], 'tlbr')
            >>> bboi = self.to_imgaug((10, 10))
        """
        import imgaug
        if len(self.data.shape) != 2:
            raise ValueError('data must be 2d got {}d'.format(len(self.data.shape)))

        tlbr = self.to_tlbr(copy=False).data
        bboi = imgaug.BoundingBoxesOnImage(
            [imgaug.BoundingBox(x1, y1, x2, y2)
             for x1, y1, x2, y2 in tlbr], shape=shape)
        return bboi
Beispiel #19
0
    def test_to_batch_in_augmentation__all_columns(self):
        batch = ia.Batch(
            images=np.zeros((1, 2, 2, 3), dtype=np.uint8),
            heatmaps=[
                ia.HeatmapsOnImage(np.zeros((2, 2, 1), dtype=np.float32),
                                   shape=(2, 2, 3))
            ],
            segmentation_maps=[
                ia.SegmentationMapsOnImage(np.zeros((2, 2, 1), dtype=np.int32),
                                           shape=(2, 2, 3))
            ],
            keypoints=[
                ia.KeypointsOnImage([ia.Keypoint(x=0, y=0)], shape=(2, 2, 3))
            ],
            bounding_boxes=[
                ia.BoundingBoxesOnImage([ia.BoundingBox(0, 0, 1, 1)],
                                        shape=(2, 2, 3))
            ],
            polygons=[
                ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
                                   shape=(2, 2, 3))
            ],
            line_strings=[
                ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
                                      shape=(2, 2, 3))
            ])

        batch_inaug = batch.to_batch_in_augmentation()

        assert isinstance(batch_inaug, ia.BatchInAugmentation)
        assert ia.is_np_array(batch_inaug.images)
        assert batch_inaug.images.shape == (1, 2, 2, 3)
        assert isinstance(batch_inaug.heatmaps[0], ia.HeatmapsOnImage)
        assert isinstance(batch_inaug.segmentation_maps[0],
                          ia.SegmentationMapsOnImage)
        assert isinstance(batch_inaug.keypoints[0], ia.KeypointsOnImage)
        assert isinstance(batch_inaug.bounding_boxes[0],
                          ia.BoundingBoxesOnImage)
        assert isinstance(batch_inaug.polygons[0], ia.PolygonsOnImage)
        assert isinstance(batch_inaug.line_strings[0], ia.LineStringsOnImage)
        assert batch_inaug.get_column_names() == [
            "images", "heatmaps", "segmentation_maps", "keypoints",
            "bounding_boxes", "polygons", "line_strings"
        ]
Beispiel #20
0
            def apply_augmenters(self, image, target):
                """
                Args:
                    image: image to apply augmentation
                    target: target to apply augmentation
                """
                if self.seq is None:
                    target = np.array(target, dtype=np.float32)
                    target[:, 0] /= image.shape[1]
                    target[:, 1] /= image.shape[0]
                    target[:, 2] /= image.shape[1]
                    target[:, 3] /= image.shape[0]

                else:
                    seq_det = self.seq.to_deterministic()

                    image = seq_det.augment_images([image])[0]

                    boxes = ia.BoundingBoxesOnImage([
                        ia.BoundingBox(x1=target[i][0],
                                       y1=target[i][1],
                                       x2=target[i][2],
                                       y2=target[i][3])
                        for i in range(target.shape[0])
                    ],
                                                    shape=image.shape)
                    boxes = seq_det.augment_bounding_boxes([boxes])[0]

                    image_after = boxes.draw_on_image(image,
                                                      thickness=2,
                                                      color=[0, 0, 255])
                    target = np.array([[
                        box_aug.x1 / image.shape[1], box_aug.y1 /
                        image.shape[0], box_aug.x2 / image.shape[1],
                        box_aug.y2 / image.shape[0], target[i][-1]
                    ] for i, box_aug in enumerate(boxes.bounding_boxes)],
                                      dtype=np.float32)

                if self.is_BGR:
                    image = image[..., ::-1]

                target[:, :4] = target[:, :4].clip(min=0.0, max=1.0)
                image = cv2.resize(image, (self.image_size, self.image_size))
                return image, target
Beispiel #21
0
def make_augumentation(image_coords, seq=None):
    image = cv.imread('{}/{}'.format(LOAD_IMGS_PATH, image_coords[0]))
    points = []

    for i in image_coords[1]:
        points.append(ia.BoundingBox(i[0], i[1], i[2], i[3]))

    bbs = ia.BoundingBoxesOnImage(points, shape=image.shape)
    seq_det = seq.to_deterministic()
    image_aug = seq_det.augment_images([image])[0]
    bbs_aug = seq_det.augment_bounding_boxes([bbs])[0]

    # print coordinates before/after augmentation (see below)
    # use after.x_int and after.y_int to get rounded integer coordinates
    points_aug = []

    h, w = image.shape[:2]
    # print( '{}  {}'.format( w, h ) )
    # print( image_coords[0] )
    for i in range(len(bbs.bounding_boxes)):
        after = bbs_aug.bounding_boxes[i]
        x1 = after.x1_int
        y1 = after.y1_int
        x2 = after.x2_int
        y2 = after.y2_int

        x1 = 0 if x1 < 0 else (w - 1 if x1 >= w else x1)
        y1 = 0 if y1 < 0 else (h - 1 if y1 >= h else y1)

        x2 = 0 if x2 < 0 else (w - 1 if x2 >= w else x2)
        y1 = 0 if y2 < 0 else (h - 1 if y2 >= h else y2)

        if (x1 < 0 or x1 >= w or x2 < 0 or x2 >= w or y1 < 0 or y1 >= h
                or y2 < 0 or y2 >= h):
            raise ValueError('Has incorrect boundings.')

        points_aug.append([x1, y1, x2, y2])

    image_after = bbs_aug.draw_on_image(image_aug,
                                        thickness=2,
                                        color=[0, 0, 255])

    return image_after, image_aug, points_aug
Beispiel #22
0
 def parser_label(self, image, yolo_label):
     label = []
     for h_index in range(self.cell_size):
         for w_index in range(self.cell_size):
             if yolo_label[h_index, w_index, 0] == 0:
                 continue
             x_center, y_center, w, h = yolo_label[h_index, w_index, 1:5]
             class_id = np.argmax(yolo_label[h_index, w_index, 5:])
             x_1 = int((x_center - 0.5 * w) * self.image_size)
             y_1 = int((y_center - 0.5 * h) * self.image_size)
             x_2 = int((x_center + 0.5 * w) * self.image_size)
             y_2 = int((y_center + 0.5 * h) * self.image_size)
             label.append(
                 ia.BoundingBox(x1=x_1,
                                y1=y_1,
                                x2=x_2,
                                y2=y_2,
                                label=class_id))
     return image, ia.BoundingBoxesOnImage(label, shape=image.shape)
Beispiel #23
0
def get_crop_dataset(augmenter, image, bboxs, max_h, max_w):
    temp_aug_bbox = []
    for bbox in bboxs:
        temp_aug_bbox.append(ia.BoundingBox(x1=bbox[0], 
                                            x2=bbox[2], 
                                            y1=bbox[1], 
                                            y2=bbox[3]))
    bbs = ia.BoundingBoxesOnImage(temp_aug_bbox, shape=image.shape)

    # keep same aug
    seq_det = augmenter.to_deterministic()
    
    res_img = seq_det.augment_image(image)
    bbs_aug = seq_det.augment_bounding_boxes([bbs])[0]
    res_bboxs = []
    for bbx in bbs_aug:
        if bbx.x1 >= 0 and bbx.y1 >= 0 and bbx.x2 < max_w and bbx.y2 < max_h:
            res_bboxs.append([int(bbx.x1), int(bbx.y1),  int(bbx.x2),  int(bbx.y2)])
    return res_img, res_bboxs
Beispiel #24
0
def augment_flip(image, bbox):
    aug = iaa.Sequential([iaa.Fliplr(1.0)])

    bbs = ia.BoundingBoxesOnImage([
        ia.BoundingBox(x1=bbox[0], y1=bbox[1], x2=bbox[2], y2=bbox[3])], shape=image.shape)

    aug = aug.to_deterministic()
    image_aug = aug.augment_image(image)
    image_aug = image_aug.copy()
    bbs_aug = aug.augment_bounding_boxes([bbs])[0]
    b = bbs_aug.bounding_boxes
    bbs_aug = [b[0].x1, b[0].y1, b[0].x2, b[0].y2]
    bbs_aug = np.asarray(bbs_aug)

    bbs_aug[0] = bbs_aug[0] if bbs_aug[0] > 0 else 0
    bbs_aug[1] = bbs_aug[1] if bbs_aug[1] > 0 else 0
    bbs_aug[2] = bbs_aug[2] if bbs_aug[2] < size else size
    bbs_aug[3] = bbs_aug[3] if bbs_aug[3] < size else size
    return image_aug, bbs_aug
Beispiel #25
0
    def get_bounding_boxes(self, width: int,
                           height: int) -> list[ia.BoundingBox]:
        bounding_boxes = []
        for group in self.keypoint_groups:
            group_x = [k.x for k in group]
            min_x = max(0, int(min(group_x) - BOUNDING_BOX_BUFFER))
            max_x = min(width, int(max(group_x) + BOUNDING_BOX_BUFFER))

            group_y = [k.y for k in group]
            min_y = max(0, int(min(group_y) - BOUNDING_BOX_BUFFER))
            max_y = min(height, int(max(group_y) + BOUNDING_BOX_BUFFER))

            bounding_boxes.append(
                ia.BoundingBox(x1=min_x,
                               y1=min_y,
                               x2=max_x,
                               y2=max_y,
                               label=self.name))
        return bounding_boxes
Beispiel #26
0
def loadData(imgPath, txtPath):
	outBoxes = [] 
	tmpBoxes = []
	
	img = cv2.imread(imgPath)
	h, w , c = img.shape
	outImg = img.reshape(1, h, w, c)
	
	with open(txtPath, "r") as f:
		for line in f.readlines():
			bbox = line.strip().split(" ")
			label = bbox[0]
			cx, cy, bw, bh = float(bbox[1]) * w, float(bbox[2]) * h, float(bbox[3]) * w, float(bbox[4]) * h
			x1, y1 = cx - 0.5 * bw, cy - 0.5 * bh
			x2, y2 = cx + 0.5 * bw, cy + 0.5 * bh
			tmpBoxes.append(ia.BoundingBox(x1, y1, x2, y2, label))
			
	outBoxes.append(tmpBoxes)
	return outImg, outBoxes
Beispiel #27
0
def augmentation(images, annotations, augmentation, save_dir, filename):
    # print("\n", augmentation, "\n")
    for idx in range(len(images)):
        image = images[idx]
        boxes = annotations[idx][0]

        ia_bounding_boxes = []
        for box in boxes:
            xyxy = xywh2xyxy(box, Width, Height)
            # print(xyxy)
            ia_bounding_boxes.append(
                ia.BoundingBox(x1=xyxy[0],
                               y1=xyxy[1],
                               x2=xyxy[2],
                               y2=xyxy[3],
                               label=box[0]))

        bbs = ia.BoundingBoxesOnImage(ia_bounding_boxes, shape=image.shape)

        seq = iaa.Sequential(augmentation)

        seq_det = seq.to_deterministic()

        image_aug = seq_det.augment_images([image])[0]
        bbs_aug = seq_det.augment_bounding_boxes([bbs])[0]

        new_image_file = save_dir + filename + annotations[idx][2]
        cv2.imwrite(new_image_file, image_aug)

        h, w = np.shape(image_aug)[0:2]
        # print("h, w : ", h, w)
        # voc_writer = Writer(new_image_file, w, h)
        with open(save_dir + filename + annotations[idx][1], "w") as f:
            for i in range(len(bbs_aug.bounding_boxes)):
                bb_box = bbs_aug.bounding_boxes[i]
                x_min = bb_box.x1
                y_min = bb_box.y1
                x_max = bb_box.x2
                y_max = bb_box.y2
                cls_id = bb_box.label
                x_cen, y_cen, w, h = xyxy2xywh(x_min, y_min, x_max, y_max)
                f.write("%d %.06f %.06f %.06f %.06f\n" %
                        (cls_id, x_cen, y_cen, w, h))
Beispiel #28
0
    def test_two_images_and_bounding_boxes(self):
        rng = iarandom.RNG(0)
        images = rng.integers(0, 256, size=(2, 256, 256, 3), dtype=np.uint8)
        bbs = []
        for x in np.linspace(0, 256, 5):
            for y in np.linspace(0, 256, 5):
                bbs.append(ia.BoundingBox(x1=x, y1=y, x2=x + 20, y2=y + 20))
        bbsoi1 = ia.BoundingBoxesOnImage(bbs, shape=images[0].shape)
        bbsoi2 = bbsoi1.shift(left=20)
        image1_w_overlay = bbsoi1.draw_on_image(images[0])
        image2_w_overlay = bbsoi2.draw_on_image(images[1])

        debug_image = iaa.draw_debug_image(images,
                                           bounding_boxes=[bbsoi1, bbsoi2])

        assert self._image_contains(images[0, ...], debug_image)
        assert self._image_contains(images[1, ...], debug_image)
        assert self._image_contains(image1_w_overlay, debug_image)
        assert self._image_contains(image2_w_overlay, debug_image)
Beispiel #29
0
    def pull_item(self, index):
        img_id = os.path.join(self.image_path, self.images[index])
        boxes = self.boxes[index]
        labels = np.ones(len(boxes)) * 0

        if len(boxes) == 0:
            return None, None, None
        img = io.imread(img_id)
        box = np.asarray(boxes)
        if True:
            ig, window, scale, padding, crop = utils.resize_image_fixed_size(
                img, self.image_size)
            if len(labels) == 0:
                return ig, box, labels
            box = box * scale
            box[:, 0] = box[:, 0] + padding[1][0]
            box[:, 1] = box[:, 1] + padding[0][0]
            box[:, 2] = box[:, 2] + padding[1][1]
            box[:, 3] = box[:, 3] + padding[0][1]

        bb = []
        for ix, x in enumerate(box):
            bb.append(ia.BoundingBox(x[0], x[1], x[2], x[3], labels[ix]))
        bbs = ia.BoundingBoxesOnImage(bb, shape=self.image_size)
        seq_det = self.img_aug.to_deterministic()

        image_aug = seq_det.augment_images([ig])[0]
        bbs_aug = seq_det.augment_bounding_boxes([bbs])[0]
        box = []
        labels = []
        for i in range(len(bbs.bounding_boxes)):
            after = bbs_aug.bounding_boxes[i]
            box.append([after.x1, after.y1, after.x2, after.y2])
            labels.append(after.label)

        box = np.asarray(box)
        box = box / np.asarray([
            self.image_size[1], self.image_size[0], self.image_size[1],
            self.image_size[0]
        ])
        box = np.clip(box, 0, 1)
        return image_aug, box, labels
Beispiel #30
0
    def aug_image(self, train_instance, categories, jitter):
        image_ann, annotations = train_instance
        image = cv2.imread(image_ann['file_name'])
        h, w = image.shape[:2]

        categories = {
            category['id']: category['name']
            for category in categories
        }

        if image is None:
            print('Cannot find ', image['file_name'])

        aug_pipe_deterministic = self.aug_pipe.to_deterministic()
        all_objs = [{
            'xmin': int(w * x['bbox'][0][0]),
            'ymin': int(h * x['bbox'][0][1]),
            'xmax': int(w * x['bbox'][1][0]),
            'ymax': int(h * x['bbox'][1][1]),
            'name': categories[x['category_id']]
        } for x in annotations if x['category_id'] in categories]

        bbs = ia.BoundingBoxesOnImage([
            ia.BoundingBox(x1=obj['xmin'],
                           y1=obj['ymin'],
                           x2=obj['xmax'],
                           y2=obj['ymax'],
                           name=obj['name']) for obj in all_objs
        ],
                                      shape=image.shape)

        image = cv2.resize(image,
                           (self.config['IMAGE_H'], self.config['IMAGE_W']))
        image = np.copy(image)
        bbs = bbs.on(image)

        if jitter:
            image = aug_pipe_deterministic.augment_image(image)
            bbs = aug_pipe_deterministic.augment_bounding_boxes([bbs])[0] \
                .cut_out_of_image().remove_out_of_image()

        return image, bbs.bounding_boxes