def cutImageBackground(self, image):
        image_info = ImageInfo(image)
        scale_x = constants.input_width / image_info.width
        scale_y = constants.input_height / image_info.height
        if scale_x > scale_y:
            image = cv2.resize(
                image,
                (constants.input_width, int(image_info.height * scale_x)))
        else:
            image = cv2.resize(
                image,
                (int(image_info.width * scale_y), constants.input_height))
        image_info = ImageInfo(image)

        x_seed = random.uniform(0,
                                1) * (image_info.width - constants.input_width)

        initial_width = int(0 + x_seed)
        final_width = int(x_seed + constants.input_width)

        y_seed = random.uniform(
            0, 1) * (image_info.height - constants.input_height)
        initial_height = int(0 + y_seed)
        final_height = int(y_seed + constants.input_height)
        return image[initial_height:final_height, initial_width:final_width, :]
 def coverInputDimensions(self, image):
     image_info = ImageInfo(image)
     if image_info.width < constants.input_width:
         image = cv2.resize(
             image, (constants.input_width,
                     int(constants.input_width / image_info.aspect_ratio)))
     image_info = ImageInfo(image)
     if image_info.height < constants.input_height:
         image = cv2.resize(
             image, (int(constants.input_height * image_info.aspect_ratio),
                     constants.input_height))
     return image
 def removeBackgroundInsideMainObject(self, original_image, contours, mask):
     original_image_info = ImageInfo(original_image)
     biggest_area, _ = contours[0]
     background_subtracted_mask = self.detectAndRemoveBackgroundColor(
         original_image)
     for contour_index in range(1, len(contours)):
         (area, contour) = contours[contour_index]
         first_pixel_in_contour = contour[0][0]
         first_pixel_in_contour = (first_pixel_in_contour[1],
                                   first_pixel_in_contour[0])
         # if the contour is at least 50% the size of the biggest element => it iss probably another object if it is not overlapping
         if area / biggest_area > 0.5 and not background_subtracted_mask[
                 first_pixel_in_contour] == 0:
             cv2.fillPoly(mask, [contour], 255)
         else:
             '''
             checking if the contour is a background or not. If the background_subtracted_mask has values as zero that are inside the contour, then the contour
             is part of the background
             '''
             if contour_index == 1:
                 background_subtracted_mask[background_subtracted_mask ==
                                            1] = 3
                 background_subtracted_mask[background_subtracted_mask ==
                                            2] = 3
             contour_canvas = self.image_utils.blankImage(
                 original_image_info.width, original_image_info.height)
             cv2.fillPoly(contour_canvas, [contour], 1)
             overlapping_areas = cv2.add(background_subtracted_mask,
                                         contour_canvas)
             mask[overlapping_areas == 1] = 0
 def detectAndRemoveBackgroundColor(self, image):
     image_info = ImageInfo(image)
     color_borders = [
         image[0][0], image[image_info.height - 1][0],
         image[0][image_info.width - 1],
         image[image_info.height - 1][image_info.width - 1]
     ]
     result = 255
     for color_border in color_borders:
         result = cv2.subtract(
             result,
             cv2.inRange(image, cv2.subtract(color_border, 2),
                         cv2.add(color_border, 1)))
     result = cv2.GaussianBlur(result, (5, 5), 3)
     result = cv2.erode(result, None, iterations=1)
     result = cv2.morphologyEx(result, cv2.MORPH_OPEN, None, iterations=3)
     areas_and_contours = self.findContours(result)
     cv2.drawContours(result, [areas_and_contours[0][1]], -1, (255), -1)
     for i in range(1, len(areas_and_contours)):
         (area, contour) = areas_and_contours[i]
         if area / areas_and_contours[0][0] > self.object_area_threshold:
             color = 0
         else:
             color = 255
         cv2.drawContours(result, [areas_and_contours[i][1]], -1, color, -1)
     return result
 def removeFlatBackgroundFromRGB(self, image, full_computation=True):
     if (len(image.shape) < 3):
         raise ValueError(f'Color channel not found in image')
     if image.shape[2] == 4:
         '''
         if the alpha channel contains at least one pixel that is not fully white,
         then the alpha channel is truly an alpha channel
         '''
         if np.any(image[:, :, 3] != 255):
             return image
         else:
             (b, g, r, _) = cv2.split(image)
             image = cv2.merge([b, g, r])
     image_info = ImageInfo(image)
     blurred_image = cv2.GaussianBlur(image, (5, 5), 0)
     sobel_image = self.applySobelFilter(blurred_image)
     contours = self.findContours(sobel_image)
     mask = self.image_utils.blankImage(image_info.width, image_info.height)
     if len(contours) == 0:
         contours = [
             (0,
              np.array([[0, 0], [0, image_info.height - 1],
                        [image_info.width - 1, 0],
                        [image_info.width - 1, image_info.height - 1]]))
         ]
     _, biggest_contour = contours[0]
     cv2.fillPoly(mask, [biggest_contour], 255)
     if full_computation and len(contours) > 1:
         self.removeBackgroundInsideMainObject(blurred_image, contours,
                                               mask)
     mask = cv2.erode(mask, None, iterations=2)
     b, g, r = cv2.split(image)
     rgba = [b, g, r, mask]
     rgba = cv2.merge(rgba, 4)
     return rgba
Пример #6
0
    def pasteRGBAimageIntoRGBimage(self,
                                   rgba_image,
                                   rgb_image,
                                   x_offset,
                                   y_offset,
                                   include_alpha_channel=False):
        image_info = ImageInfo(rgba_image)
        object_rgb = rgba_image[:, :, 0:3]
        object_mask = rgba_image[:, :, 3]
        rgb_image = self._pasteImage(object_rgb, object_mask, rgb_image,
                                     x_offset, y_offset)

        image_info_rgb = ImageInfo(rgb_image)
        mask = self.blankImage(image_info_rgb.width, image_info_rgb.height, 1)
        mask[y_offset:image_info.height + y_offset,
             x_offset:image_info.width + x_offset, 0] = object_mask[:, :]
        return rgb_image, mask
 def extractConnectedComponents(self, class_index, mask):
     connected_component = None
     image_utils = ImageUtils()
     image_info = ImageInfo(mask)
     if mask.sum() > 10.00:
         mask = (mask * 255).astype(np.uint8)
         _, contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL,
                                                   cv2.CHAIN_APPROX_SIMPLE)
         x, y, w, h = cv2.boundingRect(contours[0])
         return torch.IntTensor([class_index, x, y, x + w, y + h])
Пример #8
0
    def _pasteImage(self, pasted_image, pasted_image_mask, image_background,
                    x_offset, y_offset):
        pasted_image_info = ImageInfo(pasted_image)
        image_background_info = ImageInfo(image_background)
        image_background_bbox = BoundingBox(
            top=0,
            left=0,
            bottom=image_background_info.height,
            right=image_background_info.width)
        pasted_image_bbox = BoundingBox(
            top=y_offset,
            left=x_offset,
            bottom=y_offset + pasted_image_info.height,
            right=x_offset + pasted_image_info.width)
        intersection_bbox = pasted_image_bbox.intersect(image_background_bbox)

        pasted_image_y_offset_from, pasted_image_x_offset_from, pasted_image_y_offset_to, pasted_image_x_offset_to = 0, 0, 0, 0

        if image_background_bbox.left > pasted_image_bbox.left:
            pasted_image_x_offset_from = -pasted_image_bbox.left
        if image_background_bbox.top > pasted_image_bbox.top:
            pasted_image_y_offset_from = -pasted_image_bbox.top
        pasted_image_x_offset_to = min(
            pasted_image_x_offset_from + intersection_bbox.width - 1,
            pasted_image_info.width)
        pasted_image_y_offset_to = min(
            pasted_image_y_offset_from + intersection_bbox.height - 1,
            pasted_image_info.height)

        image_background_y_offset_from, image_background_x_offset_from, image_background_y_offset_to, image_background_x_offset_to = 0, 0, 0, 0
        if x_offset >= 0:
            image_background_x_offset_from = x_offset
        if y_offset >= 0:
            image_background_y_offset_from = y_offset
        image_background_x_offset_to = image_background_x_offset_from + pasted_image_x_offset_to - pasted_image_x_offset_from
        image_background_y_offset_to = image_background_y_offset_from + pasted_image_y_offset_to - pasted_image_y_offset_from

        for channel_index in range(3):
            image_background[image_background_y_offset_from : image_background_y_offset_to, image_background_x_offset_from : image_background_x_offset_to, channel_index] = \
                    (pasted_image[pasted_image_y_offset_from : pasted_image_y_offset_to, pasted_image_x_offset_from : pasted_image_x_offset_to, channel_index] * (pasted_image_mask[pasted_image_y_offset_from : pasted_image_y_offset_to, pasted_image_x_offset_from : pasted_image_x_offset_to] / 255)) + \
                    (image_background[image_background_y_offset_from : image_background_y_offset_to, image_background_x_offset_from : image_background_x_offset_to, channel_index] * (1 - (pasted_image_mask[pasted_image_y_offset_from : pasted_image_y_offset_to, pasted_image_x_offset_from : pasted_image_x_offset_to] / 255)))
        return image_background
Пример #9
0
 def applyContrastAndBrightness(self, image):
     channels = ImageInfo(image).channels
     distort = bool(random.getrandbits(1))
     if distort:
         contrast_parameter = random.uniform(0.1, 2.0)
         image = cv2.merge([ cv2.multiply(image[:, :, channel_index], contrast_parameter) for channel_index in range(channels)])
     distort = bool(random.getrandbits(1))
     if distort:
         brightness = random.uniform(-int(np.mean(image) / 2.0), int(np.mean(image) / 2.0))
         image = cv2.merge([ cv2.add(image[:, :, channel_index], brightness) for channel_index in range(channels) ])
     return image
Пример #10
0
    def distortImage(self, item_image):
        item_image_info = ImageInfo(item_image)
        (scale_x, scale_y) = self.getScaleParams(item_image_info.width, item_image_info.height)
        scale_matrix = self.getScaleMatrix(scale_x, scale_y)
        rototranslation_matrix = self.getScaledRotoTranslationMatrix(scale_x, scale_y, item_image_info.width, item_image_info.height)
        perspective_matrix = self.getPerspectiveMatrix()
        homography_matrix = np.dot(scale_matrix, np.dot(rototranslation_matrix, perspective_matrix))

        transformed_image = cv2.warpPerspective( item_image, homography_matrix, (constants.input_width, constants.input_height) )
        if item_image_info.channels == 4:
            alpha_channel = transformed_image[:, :, 3]
        if item_image_info.channels == 4:
            return self.image_utils.addAlphaChannelToImage(transformed_image, alpha_channel)
        else:
            return transformed_image
Пример #11
0
    def testContrastBrightnessDistorsion(self):
        image_utils = ImageUtils()
        image_distorsions = ImageDistortions()
        image_base = cv2.imread('./test/data/images/square/square_1.png')
        image_info = ImageInfo(image_base)
        expected_brigthened_image = cv2.imread(
            './test/data/images/square/distortions/square_1_brightness.png',
            cv2.IMREAD_UNCHANGED)
        image_base_with_alpha_channel = image_utils.addAlphaChannelToImage(
            image_base)
        brigthened_image = image_distorsions.distortImage(
            image_base_with_alpha_channel)
        difference_of_images = cv2.subtract(brigthened_image,
                                            expected_brigthened_image)

        self.assertTrue(np.count_nonzero(difference_of_images > 10) < 10)
Пример #12
0
 def inferenceOnFile(self, full_image_path):
     input_image = cv2.imread(full_image_path)
     if input_image is not None:
         image_info = ImageInfo(input_image)
         if image_info.channels == 3:
             if self.visual_logging:
                 cv2.imshow(f'input_image', input_image)
             objects = self.inferencer.inferenceOnImage(
                 self.model, input_image)
             self.displayResults(objects, input_image, full_image_path)
             if self.visual_logging:
                 cv2.waitKey(0)
                 cv2.destroyAllWindows()
     else:
         sys.stderr.write(
             f"Error reading image in path {full_image_path}\n")
         sys.exit(-1)
Пример #13
0
 def paddingScale(self,
                  input_image,
                  input_height=constants.input_height,
                  input_width=constants.input_width):
     image_info = ImageInfo(input_image)
     # image scaled to input field keeping aspect ratio
     if input_width / input_height > image_info.aspect_ratio:
         new_height = input_height
         new_width = int(input_height * image_info.aspect_ratio)
         input_image_scaled = cv2.resize(input_image,
                                         (new_width, new_height))
     else:
         new_width = input_width
         new_height = int(input_width / image_info.aspect_ratio)
         input_image_scaled = cv2.resize(input_image,
                                         (new_width, new_height))
     new_image = self.blankImage(input_height, input_width,
                                 image_info.channels)
     if image_info.channels != 4:
         input_image_scaled = self.addAlphaChannelToImage(
             input_image_scaled)
     input_image_scaled, _ = self.pasteRGBAimageIntoRGBimage(input_image_scaled, new_image, \
             int( ( input_width - new_width ) / 2 ), int( (input_height - new_height) / 2 ), True)
     return input_image_scaled, new_height, new_width