Exemple #1
0
    def get_search_feature(self, image, object_rect):
        _search_ratio_w = self.feature_size_w / float(self.convolution_w)
        _search_ratio_h = self.feature_size_h / float(self.convolution_h)
        _search_rect = object_rect.get_copy().scale_from_center(_search_ratio_w,
                                                                _search_ratio_h)
        _search_bgr = clip_image(image, _search_rect)
        _search_input = cv2.resize(_search_bgr, (self.input_search_w, self.input_search_h))

        if self._show_search_bgr_fid:
            display.show_image(_search_bgr, self._show_search_bgr_fid, 'Train & search patch')
        _search_feature = self.extractor.extract_multiple_features([_search_input,])
        return _search_rect, _search_bgr, _search_feature[0]
Exemple #2
0
def main():
    """
    Main Method, used to test program
    """

    # 1. create project
    # projectName = 'kilimanjaro'
    project_name = 'sanfrancisco'
    project = ProjectManager(project_name)

    # show_grid_results(project)
    # 3. create images
    answ = input('Do you want to create the images (y/n)?: ')
    if answ == 'y':
        answ = input('Batch create(y/n)?: ')
        if answ == 'y':
            project.batch_create_imagery()
        else:
            image_creator.create_images(project)

    # 4. display the image
    answ = input('Do you want to display the images (y/n)?: ')
    if answ == 'y':
        show_image(project, 'rgb', cropped=False)

    # clustering
    clusters = 7
    cropped = True
    image_type = 'rgb'
    # 5. classify the image
    answ = input('Do you want to classify the images (y/n)?: ')
    # answ = 'y'
    if answ == 'y':
        kmeans_cluster(project, clusters, image_type, cropped)
        # gmm_cluster(project, clusters, image_type, cropped)
        # dbscan_cluster(project, 5, 100, image_type, cropped)
        # plot_cost_function(project, image_type)

    # 6. show classified image
    answ = input('Do you want to show the classified images (y/n)?: ')
    if answ == 'y':
        # convert_to_png(project, 'ndvi', False, True, clusters)
        show_clustering(project, cropped)
Exemple #3
0
    def get_scaled_search_feature(self, image, object_rect):
        _scale_step_w = max(1, round(object_rect.w * self.scale_ratio))
        _scale_step_h = max(1, round(object_rect.h * self.scale_ratio))
        scaled_object_rects = []
        for i in range(2 * self.scale_test_num + 1):
            w = object_rect.w + _scale_step_w * (i - self.scale_test_num)
            h = object_rect.h + _scale_step_h * (i - self.scale_test_num)
            if w < 5 or h < 5:
                print('Warning: w < 5 or h < 5')
                continue
            cx, cy = object_rect.get_center()
            tl_x = round(cx - (w - 1)/2.0)
            tl_y = round(cy - (h - 1)/2.0)
            _rect = Rect(tl_x, tl_y, w, h)
            scaled_object_rects.append(_rect)

        _search_ratio_w = self.feature_size_w / float(self.convolution_w)
        _search_ratio_h = self.feature_size_h / float(self.convolution_h)

        _search_rect_list = []
        _search_bgr_list = []
        _search_input_list = []
        for _scaled_rect in scaled_object_rects:
            _search_rect = _scaled_rect.get_copy().scale_from_center(_search_ratio_w,
                                                                     _search_ratio_h)
            _search_bgr = clip_image(image, _search_rect)
            _search_input = cv2.resize(_search_bgr, (self.input_search_w, self.input_search_h))

            _search_rect_list.append(_search_rect)
            _search_bgr_list.append(_search_bgr)
            _search_input_list.append(_search_input)
        if self._show_search_bgr_fid:
            display.show_image(_search_bgr_list[0], self._show_search_bgr_fid, 'Train & search patch')

        _search_features = self.extractor.extract_multiple_features(_search_input_list)
        return _search_rect_list, _search_bgr_list, _search_features, scaled_object_rects
Exemple #4
0
 def test_instance(self):
     instance = show_image(1024, 512, None)
     b = sip.wrapinstance(instance.pyqwidget(), Qt.QWidget)
Exemple #5
0
def segment_contours(plate):
    """
    Finds the contours satisfying the required constraints

    :type plate: numpy.array
    :param plate: A gray image of the license plate
    :rtype: list[numpy.array]
    :return: BGR images of the contours
    """

    img = plate.copy()
    # The below copy is used only to visualize the process
    disp_img = cv2.cvtColor(plate, cv2.COLOR_GRAY2BGR)

    img_height, img_width = img.shape
    img_area = img_height * img_width

    if __debug__:
        print("\nSegmenting contours\nPart area: %.3f" % img_area)

    # Filter small noise points by filling them with black color
    contours, hierarchy = cv2.findContours(img.copy(), cv2.RETR_EXTERNAL,
                                           cv2.CHAIN_APPROX_SIMPLE)
    for i, ct in enumerate(contours):
        x, y, box_width, box_height = cv2.boundingRect(ct)
        if box_width > 0 and box_height > 0:
            box_area = float(box_width) * float(box_height)
            box_ratio = float(box_width) / float(box_height)
            if box_ratio < 1:
                box_ratio = 1 / float(box_ratio)

            limit_ratio = 5.5
            limit_area = 45.0
            if not (box_ratio < limit_ratio
                    and box_height / float(box_width) < limit_ratio
                    and img_area / box_area < limit_area
                    ) and float(img_area) / box_area > limit_area:
                cv2.drawContours(img, [ct], 0, (0, 0, 0), thickness=-1)
                cv2.drawContours(disp_img, [ct], 0, (0, 0, 0), thickness=-1)

    boxes = []
    # Find the contours satisfying the conditions i.e the license plate characters
    # RETR_TREE returns a hierarchy of the contours: parent and children
    contours, hierarchy = cv2.findContours(img.copy(), cv2.RETR_TREE,
                                           cv2.CHAIN_APPROX_SIMPLE)
    for i, ct in enumerate(contours):

        # Skip the contour if it has a parent contour
        if hierarchy[0][i][3] != -1:
            continue

        x, y, box_width, box_height = cv2.boundingRect(ct)
        if box_width > 0 and box_height > 0:
            box_area = float(box_width) * float(box_height)
            box_ratio = float(box_width) / float(box_height)
            if box_ratio < 1:
                box_ratio = 1 / float(box_ratio)

            # TODO: Square in the middle always caught, adjust box_ratio upper limit
            # TODO: Number 1 (one) has ratio ~ 4.5: very thin and high
            limit_ratio = 5.5
            limit_area = 45.0
            if box_ratio < limit_ratio and box_height / float(box_width) < limit_ratio \
                    and 4 < img_area / box_area < limit_area:
                if __debug__:
                    print("Box width: %.3f, height: %.3f" %
                          (box_width, box_height))
                    print("Box area: %.3f" % box_area)
                    print("Box ratio: %.3f" % box_ratio)
                    print("Area ratio: %.3f" % (img_area / box_area))
                    print("Passed\n")

                # Experimental: fill contour with color
                # cv2.drawContours(img, [ct], 0, (255, 255, 255), thickness=-1)

                # Draw a rectangle around the contour (for visualization only)
                cv2.rectangle(disp_img, (x, y),
                              (x + box_width, y + box_height), (0, 255, 0), 1)

                box_points = np.array([(x, y), (x, y + box_height),
                                       (x + box_width, y),
                                       (x + box_width, y + box_height)])
                boxes.append(np.array(box_points))
            else:
                # Once again filter small noise points by filling them with black color
                # in case some were missed the first time
                cv2.rectangle(disp_img, (x, y),
                              (x + box_width, y + box_height), (0, 0, 255), 1)
                if img_area / box_area > limit_area:
                    cv2.drawContours(img, [ct], 0, (0, 0, 0), thickness=-1)
                    cv2.drawContours(disp_img, [ct],
                                     0, (0, 0, 0),
                                     thickness=-1)

    # EXPERIMENTAL
    # The idea is to first fill a contour with a solid color
    # and after that fill any child contour with black color

    # for i, ct in enumerate(contours):
    # if hierarchy[0][i][3] != -1:
    #         parent_idx = hierarchy[0][i][3]
    #         parent_contour = contours[parent_idx]
    #         parent_area = cv2.contourArea(parent_contour)
    #         child_area = cv2.contourArea(ct)
    #         if child_area > float(parent_area) / 9:
    #             # Approximate using a polygon
    #             peri = cv2.arcLength(ct, True)
    #             approx = cv2.approxPolyDP(ct, 0.020 * peri, True)
    #             if cv2.isContourConvex(approx):
    #                 cv2.drawContours(img, [approx], 0, (0, 0, 0), thickness=-1)

    # sort the arrays representing the boxes by their x-coordinate
    boxes_sorted = sorted(boxes, key=lambda item: (item[0][0], item[0][1]))
    boxes_sep = display.get_parts_of_image(img, boxes_sorted)
    if __debug__:
        display.show_image(disp_img, resize=False)

    return [cv2.cvtColor(box, cv2.COLOR_GRAY2BGR) for box in boxes_sep]
def segment_contours(plate):
    """
    Finds the contours satisfying the required constraints

    :type plate: numpy.array
    :param plate: A gray image of the license plate
    :rtype: list[numpy.array]
    :return: BGR images of the contours
    """

    img = plate.copy()
    # The below copy is used only to visualize the process
    disp_img = cv2.cvtColor(plate, cv2.COLOR_GRAY2BGR)

    img_height, img_width = img.shape
    img_area = img_height * img_width

    if __debug__:
        print("\nSegmenting contours\nPart area: %.3f" % img_area)

    # Filter small noise points by filling them with black color
    contours, hierarchy = cv2.findContours(img.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    for i, ct in enumerate(contours):
        x, y, box_width, box_height = cv2.boundingRect(ct)
        if box_width > 0 and box_height > 0:
            box_area = float(box_width) * float(box_height)
            box_ratio = float(box_width) / float(box_height)
            if box_ratio < 1:
                box_ratio = 1 / float(box_ratio)

            limit_ratio = 5.5
            limit_area = 45.0
            if not (box_ratio < limit_ratio
                    and box_height / float(box_width) < limit_ratio
                    and img_area / box_area < limit_area
            ) and float(img_area) / box_area > limit_area:
                cv2.drawContours(img, [ct], 0, (0, 0, 0), thickness=-1)
                cv2.drawContours(disp_img, [ct], 0, (0, 0, 0), thickness=-1)

    boxes = []
    # Find the contours satisfying the conditions i.e the license plate characters
    # RETR_TREE returns a hierarchy of the contours: parent and children
    contours, hierarchy = cv2.findContours(img.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    for i, ct in enumerate(contours):

        # Skip the contour if it has a parent contour
        if hierarchy[0][i][3] != -1:
            continue

        x, y, box_width, box_height = cv2.boundingRect(ct)
        if box_width > 0 and box_height > 0:
            box_area = float(box_width) * float(box_height)
            box_ratio = float(box_width) / float(box_height)
            if box_ratio < 1:
                box_ratio = 1 / float(box_ratio)

            # TODO: Square in the middle always caught, adjust box_ratio upper limit
            # TODO: Number 1 (one) has ratio ~ 4.5: very thin and high
            limit_ratio = 5.5
            limit_area = 45.0
            if box_ratio < limit_ratio and box_height / float(box_width) < limit_ratio \
                    and 4 < img_area / box_area < limit_area:
                if __debug__:
                    print("Box width: %.3f, height: %.3f" % (box_width, box_height))
                    print("Box area: %.3f" % box_area)
                    print("Box ratio: %.3f" % box_ratio)
                    print("Area ratio: %.3f" % (img_area / box_area))
                    print("Passed\n")

                # Experimental: fill contour with color
                # cv2.drawContours(img, [ct], 0, (255, 255, 255), thickness=-1)

                # Draw a rectangle around the contour (for visualization only)
                cv2.rectangle(disp_img, (x, y), (x + box_width, y + box_height), (0, 255, 0), 1)

                box_points = np.array(
                    [(x, y), (x, y + box_height), (x + box_width, y), (x + box_width, y + box_height)]
                )
                boxes.append(np.array(box_points))
            else:
                # Once again filter small noise points by filling them with black color
                # in case some were missed the first time
                cv2.rectangle(disp_img, (x, y), (x + box_width, y + box_height), (0, 0, 255), 1)
                if img_area / box_area > limit_area:
                    cv2.drawContours(img, [ct], 0, (0, 0, 0), thickness=-1)
                    cv2.drawContours(disp_img, [ct], 0, (0, 0, 0), thickness=-1)

    # EXPERIMENTAL
    # The idea is to first fill a contour with a solid color
    # and after that fill any child contour with black color

    # for i, ct in enumerate(contours):
    # if hierarchy[0][i][3] != -1:
    #         parent_idx = hierarchy[0][i][3]
    #         parent_contour = contours[parent_idx]
    #         parent_area = cv2.contourArea(parent_contour)
    #         child_area = cv2.contourArea(ct)
    #         if child_area > float(parent_area) / 9:
    #             # Approximate using a polygon
    #             peri = cv2.arcLength(ct, True)
    #             approx = cv2.approxPolyDP(ct, 0.020 * peri, True)
    #             if cv2.isContourConvex(approx):
    #                 cv2.drawContours(img, [approx], 0, (0, 0, 0), thickness=-1)

    # sort the arrays representing the boxes by their x-coordinate
    boxes_sorted = sorted(boxes, key=lambda item: (item[0][0], item[0][1]))
    boxes_sep = display.get_parts_of_image(img, boxes_sorted)
    if __debug__:
        display.show_image(disp_img, resize=False)

    return [cv2.cvtColor(box, cv2.COLOR_GRAY2BGR) for box in boxes_sep]