Exemplo n.º 1
0
    def filter_outliers_top(self, seg, y_range, threshold, verbose=False):
        """ Filters outliers that end up on top of the segments

        :param y_range: Range of pixels in y direction
        :param threshold: threshold for amount of diamonds in the same height
        to say if current diamond is a diamond or outlier
        :param verbose: shows removed outliers in self.rgb_img
        :return: No return
        """
        seg.diamonds.sort(key=attrgetter('y'))
        nb_removed = 0
        # for i, dia in enumerate(seg.diamonds[:10]):
        for i, dia in enumerate(seg.diamonds[:10]):
            in_range = 0
            for other_dia in seg.diamonds:
                y_dist = abs(dia.y - other_dia.y)
                if y_dist <= y_range:
                    in_range += 1
                if in_range > threshold:
                    break
            if in_range <= threshold:
                if verbose:
                    draw_circle(self.rgb_img, seg.diamonds[i - nb_removed].x,
                                seg.diamonds[i - nb_removed].y, 20,
                                consts.YELLOW)

                seg.diamonds.pop(i - nb_removed)
                nb_removed += 1
Exemplo n.º 2
0
 def draw_detected_diamonds(self, segment):
     """Diamond drawer
     This method draws diamond positions for a specific segment on the rgb
     image. For debugging purposes
     :param segment: input segment you want to draw the diamonds for
     :return: No return
     """
     for blob in segment.diamonds:
         draw_circle(self.rgb_img, blob.x, blob.y, thickness=5)
Exemplo n.º 3
0
    def run_pipeline(self, file_path, show_io=False):
        """Run pipeline
        This method is used by the backend to invoke the cv pipeline and
        process one image taken by the Basler camera system.

        :param file_path: Path to the file in the filesystem
        :param show_io: For debugging purposes you can show the input and
        output
        :return: Returns segments for the evaluation using labeled data
        """

        # reset image, segments and variables
        self.reset_vars()

        # load new image
        self.set_img(file_path)
        if show_io:
            self.rgb_img = cv.cvtColor(self.gray_img, cv.COLOR_GRAY2BGR)
            self.show_img("Input Image", self.rgb_img, divisor=4)

        # 1. Multi-scale template matching
        self.set_dia_segs_multi_scale_tm(verbose=False)

        # 1.2 Filter segments for more robustness
        self.filter_segments(verbose=False)

        # 2. Sort segments
        # NOTE: This only works if 12 segments are detected.
        self.sort_segments(verbose=False)

        for seg in self.segments:
            # 3. segment processing
            self.process_segments_dbscan(seg, verbose=False)
            # 4. outliers top
            self.filter_outliers_top(seg,
                                     y_range=25,
                                     threshold=4,
                                     verbose=False)
            # 5. outliers left right
            self.filter_outliers_left_right(seg,
                                            x_range=30,
                                            threshold=1,
                                            verbose=False)

        # show detected diamonds
        if show_io:
            # draws detected diamonds on input image for frontend
            for seg in self.segments:
                # self.draw_detected_diamonds(seg)
                for diamond in seg.diamonds:
                    draw_circle(self.rgb_img, diamond.x, diamond.y, 5,
                                consts.GREEN)
            self.show_img("Final Output", self.rgb_img, divisor=4)

        return self.segments
Exemplo n.º 4
0
    def run_pipeline(self, file_path, show_io=False, create_rgb_img=False):
        """Run pipeline
        This method is used by the backend to invoke the cv pipeline and
        process one image taken by the Basler camera system.

        :param file_path: Path to the file in the filesystem
        :param show_io: For debugging purposes you can show the input and
        output
        :return: Returns segments for the evaluation using labeled data
        """

        # reset image, segments and variables
        self.reset_vars()

        # load new image
        self.set_img(file_path)

        if create_rgb_img:
            self.rgb_img = cv.cvtColor(self.gray_img, cv.COLOR_GRAY2BGR)
        if show_io:
            show_img("Input Image", self.rgb_img, divisor=4)

        self.set_segs_fixed_bounding_boxes(use_polygons=True, verbose=True)

        for seg in self.segments:
            # 3. segment processing
            self.process_segments(seg, verbose=False)
            # # 4. outliers top
            # self.filter_outliers_top(seg, y_range=25, threshold=4,
            #                          verbose=False)
            # # 5. outliers left right
            # self.filter_outliers_left_right(seg, x_range=30, threshold=1,
            #                                 verbose=False)

        # show detected diamonds
        if show_io:
            # draws detected diamonds on input image for frontend
            for seg in self.segments:
                # self.draw_detected_diamonds(seg)
                for diamond in seg.diamonds:
                    draw_circle(self.rgb_img, diamond.x, diamond.y, 5,
                                consts.GREEN)
            show_img("Final Output", self.rgb_img, divisor=4)

        return self.segments
Exemplo n.º 5
0
    def sort_segments(self, verbose=False):
        """Segment sorter
        This method sorts the segment by a specific order starting with segment
        on the top left and ending bottom right.
        1   2   3
        4   5   6
        7   8   9
        10  11  12
        :param verbose: Activate debug windows to show what's happening here
        :return: No return
        """

        # check if correct number of segments were detected,
        # exits if number of segments != 12, since this is the basis for the
        # computations in this function
        self.assert_nmbr_segments()

        sorted_segs = sorted(self.segments, key=attrgetter('y'))
        temp_list = []
        for i in range(0, len(sorted_segs), 3):
            row_segs = [sorted_segs[i], sorted_segs[i + 1], sorted_segs[i + 2]]
            row_segs = sorted(row_segs, key=attrgetter('x'))
            row_segs[0].set_id(i + 1)
            row_segs[1].set_id(i + 2)
            row_segs[2].set_id(i + 3)
            # size_updater_for_contour_detection(row_segs, verbose=verbose)

            temp_list.append(row_segs)

        # update segments with correct order and sizes
        self.segments = [item for sublist in temp_list for item in sublist]

        if verbose:
            for seg in self.segments:
                draw_circle(self.rgb_img, seg.x, seg.y, thickness=50)
                cv.rectangle(self.rgb_img, (seg.x, seg.y),
                             (seg.x + seg.w, seg.y + seg.h), consts.YELLOW, 6)
                self.show_img("Sorting Segments", self.rgb_img, divisor=4)
                key = cv.waitKey()
                if "c" == chr(key & 255):
                    continue
                if "q" == chr(key & 255):
                    log.info("Breaking from segment counter")
                    break
Exemplo n.º 6
0
    def filter_sides(self, seg, x_range, threshold, verbose):
        nb_removed = 0
        # for i, dia in enumerate(seg.diamonds[:10]):
        for i, dia in enumerate(seg.diamonds[:5]):
            in_range = 0
            for other_dia in seg.diamonds:
                x_dist = abs(dia.x - other_dia.x)
                if x_dist <= x_range:
                    in_range += 1
                if in_range > threshold:
                    break
            if in_range <= threshold:
                if verbose:
                    draw_circle(self.rgb_img, seg.diamonds[i - nb_removed].x,
                                seg.diamonds[i - nb_removed].y, 20,
                                consts.YELLOW)

                seg.diamonds.pop(i - nb_removed)
                nb_removed += 1
Exemplo n.º 7
0
    def compare_segments(self, detected_segs, nb, verbose=False):
        """
        This function compares the detected diamonds with the true position of
        the diamonds
        :param detected_segs: segments detected using cv-pipeline
        :param verbose on/off results in OpenCV GUI
        :return:
        """
        # distance threshold between true and detected diamonds in pixels
        dist_th = 20

        # 1. total error
        sum_true_diamonds = 0
        for seg in self.eval_segments:
            sum_true_diamonds += len(seg.diamonds)
        sum_detect_diamonds = 0
        for seg in detected_segs:
            sum_detect_diamonds += len(seg.diamonds)
        total_error = \
            np.abs(sum_true_diamonds - sum_detect_diamonds) / sum_true_diamonds
        log.info(" TOTAL: %i / %i [DETECT / TRUE]: %.1f%% error" %
                 (sum_detect_diamonds, sum_true_diamonds, total_error * 100))

        # 2. error per segment
        total_trefferquote = 0
        nb_total_trefferquote = 0
        total_fehlerrate = 0
        nb_total_fehlerrate = 0
        for true_seg, det_seg in zip(self.eval_segments, detected_segs):
            error_per_seg = \
                np.abs(len(true_seg.diamonds) - len(det_seg.diamonds)) / \
                len(true_seg.diamonds)

            # 3. compute correctly detected per segment
            correct = []
            incorrect = []
            matching_closest = -1
            for i, det_dia in enumerate(det_seg.diamonds):
                min_dist = np.inf
                curr_diamond = np.array([det_dia.x, det_dia.y])

                for j, tru_dia in enumerate(true_seg.diamonds):
                    dist = \
                        np.linalg.norm(curr_diamond - np.array([tru_dia.x,
                                                                tru_dia.y]))
                    if dist < min_dist:
                        min_dist = dist
                        matching_closest = j
                if verbose:
                    draw_circle(self.eval_img, det_dia.x, det_dia.y, 10,
                                consts.BLUE)

                if min_dist < dist_th:
                    correct.append(i)
                    if verbose:
                        draw_circle(self.test_img,
                                    true_seg.diamonds[matching_closest].x,
                                    true_seg.diamonds[matching_closest].y, 10,
                                    consts.GREEN)
                else:
                    incorrect.append(i)
                    if verbose:
                        draw_circle(self.eval_img, det_dia.x, det_dia.y, 4,
                                    consts.YELLOW)

            accuracy = len(correct) / len(true_seg.diamonds)
            total_trefferquote += accuracy
            nb_total_trefferquote += len(correct)

            if verbose:
                for corr in correct:
                    draw_circle(self.eval_img, det_seg.diamonds[corr].x,
                                det_seg.diamonds[corr].y, 4, consts.GREEN)

            # 4. detect falsely detected DIA
            error_falsely = \
                (len(det_seg.diamonds) - len(correct)) / len(true_seg.diamonds)

            # 5. not detected diamonds
            not_found = []
            for i, tru_dia in enumerate(true_seg.diamonds):
                min_dist = np.inf
                curr_diamond = np.array([tru_dia.x, tru_dia.y])
                for j, det_dia in enumerate(det_seg.diamonds):
                    dist = \
                        np.linalg.norm(curr_diamond - np.array([det_dia.x,
                                                                det_dia.y]))
                    if dist < min_dist:
                        min_dist = dist
                if min_dist > dist_th:
                    not_found.append(i)

            error_not_found = len(not_found) / len(true_seg.diamonds)

            if verbose:
                for index_nf in not_found:
                    draw_circle(self.eval_img, true_seg.diamonds[index_nf].x,
                                true_seg.diamonds[index_nf].y, 4,
                                consts.YELLOW)
                    self.show_img("eval with diamonds",
                                  self.eval_img,
                                  divisor=4)

            nb_fehlerrate = \
                (len(det_seg.diamonds) - len(correct)) + len(not_found)
            nb_total_fehlerrate += nb_fehlerrate
            error_fehlerrate = nb_fehlerrate / len(true_seg.diamonds)
            total_fehlerrate += error_fehlerrate

            # log eval per segment
            log.debug("\tSEG %2i, %i / %i [DETECT / TRUE]: %.1f%% error" %
                      (true_seg.id, len(det_seg.diamonds),
                       len(true_seg.diamonds), error_per_seg * 100))
            log.debug("\t--> TREFFERQUOTE %i / %i [CORRECT / TRUE]: %.1f%% "
                      "accuracy" %
                      (len(correct), len(true_seg.diamonds), accuracy * 100))
            log.debug("\t--> FEHLERRATE %i / %i [FR / TRUE]: %.1f%% "
                      "error" % (nb_fehlerrate, len(
                          true_seg.diamonds), error_fehlerrate * 100))
            log.debug("\t   --> FALSELY %i / %i [FALSE / TRUE]: %.1f%% "
                      "error" % (len(det_seg.diamonds) - len(correct),
                                 len(true_seg.diamonds), error_falsely * 100))
            log.debug("\t   --> NOT FOUND %i / %i [NOT FOUND / TRUE]: %.1f%% "
                      "error\n" % (len(not_found), len(
                          true_seg.diamonds), error_not_found * 100))
        # log total
        log.info(" TOTAL TREFFERQUOTE (HIT RATIO) %i / %i [HR / TRUE]: %.1f%% "
                 "accuracy" % (nb_total_trefferquote, sum_true_diamonds,
                               (total_trefferquote / 12) * 100))
        log.info(" TOTAL FEHLERRATE (ERROR RATE) %i / %i [FR / TRUE]: %.1f%% "
                 "error" % (nb_total_fehlerrate, sum_true_diamonds,
                            (total_fehlerrate / 12) * 100))

        return total_trefferquote, total_fehlerrate
Exemplo n.º 8
0
    def process_segments(self,
                         seg,
                         inner_seg=None,
                         verbose=False,
                         verbose_seg=False):
        """Process detected segments
        This method processes the detected segments using morphological
        operations, multiotsu thresholding and DBSCAN.

        :param seg: the segment that is being processed
        :param verbose: Activate debug windows to show what's happening here
        :return: No return
        """
        log.debug("SEG ID = %i" % seg.id)
        self.gray_seg = self.gray_img[seg.y:seg.y + seg.h, seg.x:seg.x + seg.w]

        # f_gray_seg = self.gray_seg.flatten()
        # plt.hist(f_gray_seg, bins =list(range(np.amin(f_gray_seg),np.amax(f_gray_seg),2 )))
        # plt.title("histogram")
        # plt.savefig("hist.png")
        print("\n \n MIN ", np.amin(self.gray_seg), " MAX ",
              np.amax(self.gray_seg))
        if self.use_polygons:
            # remove reflections
            min_val_pixel = np.min(self.gray_seg)
            # self.gray_seg[self.gray_seg == 255] = min_val_pixel
            if min_val_pixel > 0:
                self.gray_seg[self.gray_seg >= 120] = min_val_pixel

            else:
                self.gray_seg[self.gray_seg >= 120] = 1

            if min_val_pixel > 0:
                upper_gray = self.gray_seg[:round(self.gray_seg.shape[0] /
                                                  2), :]
                upper_gray[upper_gray >= 80] = min_val_pixel
                self.gray_seg[:round(self.gray_seg.shape[0] /
                                     2), :] = upper_gray

            else:
                upper_gray = self.gray_seg[:round(self.gray_seg.shape[0] / 2)]
                upper_gray[upper_gray >= 80] = 1
                self.gray_seg[:round(self.gray_seg.shape[0] /
                                     2), :] = upper_gray

            if min_val_pixel > 0:
                gray_23 = self.gray_seg[
                    round(self.gray_seg.shape[0] /
                          2):round((self.gray_seg.shape[0] / 3) * 2), :]
                gray_23[gray_23 >= 100] = min_val_pixel
                self.gray_seg[round(self.gray_seg.shape[0] /
                                    2):round((self.gray_seg.shape[0] / 3) *
                                             2), :] = gray_23

            else:
                gray_23 = self.gray_seg[
                    round(self.gray_seg.shape[0] /
                          2):round((self.gray_seg.shape[0] / 3) * 2), :]
                gray_23[gray_23 >= 100] = 1
                self.gray_seg[round(self.gray_seg.shape[0] /
                                    2):round((self.gray_seg.shape[0] / 3) *
                                             2), :] = gray_23

            #show_img("00 gray_seg remove white pixel", self.gray_seg, 1)
            # inner_seg.pts -= inner_seg.pts.min(axis=0)
            # mask = np.zeros(self.gray_seg.shape[:2], dtype=np.uint8)
            # show_img("mask", mask, 1)
            # cv.drawContours(mask, [inner_seg.pts], -1, (255, 255, 255), -1,
            #                 cv.LINE_AA)
            # self.gray_seg[self.gray_seg >= 250] = 1
            # self.gray_seg = cv.bitwise_and(self.gray_seg, self.gray_seg,
            #                                mask=mask)
            # show_img("0 gray_seg", self.gray_seg, 1)
            # # self.gray_seg[self.gray_seg == 0] = 255

            # bg = np.ones_like(self.gray_seg, dtype=np.uint8) * 255
            # cv.bitwise_not(bg, bg, mask=mask)
            # self.gray_seg = cv.bitwise_xor(self.gray_seg, bg)
            # show_img("1 gray_seg", self.gray_seg, 1)

            # show_img("bg", bg, 1)
            #show_img("1 OUTER gray_seg", self.gray_seg, 1)
            # self.gray_seg += bg

            #
        if verbose:
            self.rgb_seg = cv.cvtColor(self.gray_seg, cv.COLOR_GRAY2BGR)
            show_img("0: Input segment", self.gray_seg, divisor=1)

        # remove reflections
        # # ToDo moved this out of here
        # min_val_pixel = np.min(self.gray_seg)
        # # self.gray_seg[self.gray_seg == 255] = min_val_pixel
        # self.gray_seg[self.gray_seg == 255] = min_val_pixel
        # if verbose:
        #     show_img("1: Removed reflections", self.gray_seg, divisor=1)

        # median blur
        # processed_seg = cv.medianBlur(self.gray_seg, 11)
        #processed_seg = cv.medianBlur(self.gray_seg, 11)

        processed_seg = cv.medianBlur(self.gray_seg, 11)
        upper_upper_blur = processed_seg[:12, :]
        upper_upper_blur = cv.medianBlur(upper_upper_blur, 25)
        processed_seg[0:12, :] = upper_upper_blur

        upper_blur = processed_seg[:12, :]
        upper_upper_blur = cv.medianBlur(upper_upper_blur, 25)
        processed_seg[0:12, :] = upper_upper_blur
        #cv.rectangle(processed_seg, (0 ,0),
        #                     ( processed_seg.shape[1], 12 ),
        #                    consts.YELLOW, thickness=1)
        upper_blur = processed_seg[12:30, :]
        upper_blur = cv.medianBlur(upper_blur, 3)
        processed_seg[12:30, :] = upper_blur

        # cv.rectangle(processed_seg, (0,12),
        #                      (processed_seg.shape[1] , 60),
        #                     consts.RED, thickness=1)
        if verbose:
            show_img("2: Blurred segment", processed_seg, divisor=1)

        # # remove "salt" noise
        # opening_kernel = cv.getStructuringElement(
        #     cv.MORPH_ELLIPSE,
        #     (self.seg_opening_kern_size, self.seg_opening_kern_size))
        # processed_seg = cv.morphologyEx(self.gray_seg, cv.MORPH_OPEN,
        #                                 opening_kernel,
        #                                 iterations=3)
        # if verbose:
        #     show_img("3: Opening after input", processed_seg,
        #                   divisor=1)

        # denoise
        cv.fastNlMeansDenoising(processed_seg,
                                processed_seg,
                                h=5,
                                templateWindowSize=7,
                                searchWindowSize=21)

        # test_seg2 = cv.fastNlMeansDenoising(processed_seg, None, h=10,
        #                                         templateWindowSize=5,
        #                                         searchWindowSize=21)
        if verbose:
            show_img("3: Denoised image", processed_seg, divisor=1)

            # show_img("3.1: Denoised image high h", test_seg2, divisor=1)

        # multi otsu thresholding
        thresholds = threshold_multiotsu(processed_seg, classes=5)

        #thresholds2 = threshold_multiotsu(processed_seg, classes=6)
        print("\n \n thresholds   :  ", thresholds, thresholds)
        # thresholds[0] -= 3
        processed_seg = np.uint8(np.digitize(processed_seg, bins=thresholds))
        processed_seg[processed_seg == 0] = 0
        processed_seg[processed_seg == 1] = 49
        processed_seg[processed_seg == 2] = 99
        processed_seg[processed_seg == 3] = 255
        processed_seg[processed_seg == 4] = 255
        processed_seg[processed_seg == 5] = 255
        processed_seg[processed_seg == 6] = 255

        processed_seg = adjust_right(processed_seg, seg, False)
        processed_seg = adjust_left(processed_seg, seg, False)
        processed_seg = adjust_bottom(processed_seg, seg, False)
        if verbose:
            show_img("5: Multi Otsu", processed_seg, divisor=1)

        # # dilate with radial kernel
        # radial_kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, (3, 3))
        # # processed_seg = cv.dilate(processed_seg, radial_kernel,
        # #                           iterations=1)
        # processed_seg = cv.morphologyEx(processed_seg, cv.MORPH_OPEN,
        #                                 radial_kernel, iterations=3)
        # if verbose:
        #     show_img("6: Openend output", processed_seg, divisor=1)
        #
        # closing_kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, (3, 3))
        # processed_seg = cv.morphologyEx(processed_seg, cv.MORPH_CLOSE,
        #                                 closing_kernel, iterations=4)
        # if verbose:
        #     show_img("7: closing after otsu", processed_seg, divisor=1)
        if verbose:
            ths = [
                i for i in range(self.bd_minThreshold, self.bd_maxThreshold,
                                 self.bd_thresholdStep)
            ]
            for th in ths:
                _, img = cv.threshold(processed_seg, th, 255, cv.THRESH_BINARY)
                show_img("threshold for %s" % str(th), img, divisor=1)

        # # create border for diamonds close to segment edge
        border = self.border  # in pixels
        if border:
            processed_seg = cv.copyMakeBorder(processed_seg,
                                              border,
                                              border,
                                              border,
                                              border,
                                              borderType=cv.BORDER_CONSTANT,
                                              value=255)
            if verbose:
                show_img("7: With border", processed_seg, divisor=1)

        # Blob detection
        blob_keypts = self.blob_detector_seg.detect(processed_seg)
        log.debug(" detected %i blobs" % len(blob_keypts))

        for keypt in blob_keypts:
            seg.diamonds.append(
                Blob(int(keypt.pt[0] - border + seg.x),
                     int(keypt.pt[1] - border + seg.y), seg.id))

        if verbose_seg:

            print("SEG INFO / ", seg.id, seg.x, seg.y, seg.left_border,
                  seg.right_border)
            cv.rectangle(self.rgb_img, (seg.x + seg.left_border, seg.y),
                         (seg.x + seg.w - seg.right_border, seg.y + seg.h),
                         consts.BLUE,
                         thickness=1)

        if verbose:
            for diamond in seg.diamonds:
                draw_circle(self.rgb_img, diamond.x, diamond.y, 5,
                            consts.GREEN)

            show_img("Current detection", self.rgb_img, divisor=5)
            cv.waitKey()
Exemplo n.º 9
0
    def process_segments(self, seg, verbose=False):
        """Process detected segments
        This method processes the detected segments using morphological
        operations, multiotsu thresholding and DBSCAN.

        :param seg: the segment that is being processed
        :param verbose: Activate debug windows to show what's happening here
        :return: No return
        """
        log.debug("SEG ID = %i" % seg.id)
        self.gray_seg = self.gray_img[seg.y:seg.y + seg.h, seg.x:seg.x + seg.w]
        if self.use_polygons:
            # remove reflections
            min_val_pixel = np.min(self.gray_seg)
            # self.gray_seg[self.gray_seg == 255] = min_val_pixel
            if min_val_pixel > 0:
                self.gray_seg[self.gray_seg >= 250] = min_val_pixel
            else:
                self.gray_seg[self.gray_seg >= 250] = 1

            seg.pts -= seg.pts.min(axis=0)
            mask = np.zeros(self.gray_seg.shape[:2], dtype=np.uint8)
            show_img("mask", mask, 1)
            cv.drawContours(mask, [seg.pts], -1, (255, 255, 255), -1,
                            cv.LINE_AA)
            self.gray_seg = cv.bitwise_and(self.gray_seg,
                                           self.gray_seg,
                                           mask=mask)
            show_img("gray_seg", self.gray_seg, 1)
            # self.gray_seg[self.gray_seg == 0] = 255

            bg = np.ones_like(self.gray_seg, dtype=np.uint8) * 255
            cv.bitwise_not(bg, bg, mask=mask)
            self.gray_seg = cv.bitwise_xor(self.gray_seg, bg)
            show_img("gray_seg2", self.gray_seg, 1)

            show_img("bg", bg, 1)
            # self.gray_seg += bg

        if verbose:
            self.rgb_seg = cv.cvtColor(self.gray_seg, cv.COLOR_GRAY2BGR)
            show_img("0: Input segment", self.gray_seg, divisor=1)

        # remove reflections
        # # ToDo moved this out of here
        # min_val_pixel = np.min(self.gray_seg)
        # # self.gray_seg[self.gray_seg == 255] = min_val_pixel
        # self.gray_seg[self.gray_seg == 255] = min_val_pixel
        # if verbose:
        #     show_img("1: Removed reflections", self.gray_seg, divisor=1)

        # median blur
        # processed_seg = cv.medianBlur(self.gray_seg, 11)
        processed_seg = cv.medianBlur(self.gray_seg, 11)
        # processed_seg = cv.GaussianBlur(self.gray_seg, (11,11), 5)
        if verbose:
            show_img("2: Blurred segment", processed_seg, divisor=1)

        # # remove "salt" noise
        # opening_kernel = cv.getStructuringElement(
        #     cv.MORPH_ELLIPSE,
        #     (self.seg_opening_kern_size, self.seg_opening_kern_size))
        # processed_seg = cv.morphologyEx(self.gray_seg, cv.MORPH_OPEN,
        #                                 opening_kernel,
        #                                 iterations=3)
        # if verbose:
        #     show_img("3: Opening after input", processed_seg,
        #                   divisor=1)

        # denoise
        cv.fastNlMeansDenoising(processed_seg,
                                processed_seg,
                                h=5,
                                templateWindowSize=7,
                                searchWindowSize=21)

        # test_seg2 = cv.fastNlMeansDenoising(processed_seg, None, h=10,
        #                                         templateWindowSize=5,
        #                                         searchWindowSize=21)
        if verbose:
            show_img("3: Denoised image", processed_seg, divisor=1)
            # show_img("3.1: Denoised image high h", test_seg2, divisor=1)

        # multi otsu thresholding
        print("HERE")
        thresholds = skimage.filters.threshold_multiotsu(processed_seg,
                                                         classes=5)
        # thresholds[0] -= 3
        processed_seg = np.uint8(np.digitize(processed_seg, bins=thresholds))
        processed_seg[processed_seg == 0] = 0
        processed_seg[processed_seg == 1] = 49
        processed_seg[processed_seg == 2] = 99
        processed_seg[processed_seg == 3] = 255
        processed_seg[processed_seg == 4] = 255
        processed_seg[processed_seg == 5] = 255
        processed_seg[processed_seg == 6] = 255
        if verbose:
            show_img("5: Multi Otsu", processed_seg, divisor=1)

        # # dilate with radial kernel
        # radial_kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, (3, 3))
        # # processed_seg = cv.dilate(processed_seg, radial_kernel,
        # #                           iterations=1)
        # processed_seg = cv.morphologyEx(processed_seg, cv.MORPH_OPEN,
        #                                 radial_kernel, iterations=3)
        # if verbose:
        #     show_img("6: Openend output", processed_seg, divisor=1)
        #
        # closing_kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, (3, 3))
        # processed_seg = cv.morphologyEx(processed_seg, cv.MORPH_CLOSE,
        #                                 closing_kernel, iterations=4)
        # if verbose:
        #     show_img("7: closing after otsu", processed_seg, divisor=1)
        if verbose:
            ths = [
                i for i in range(self.bd_minThreshold, self.bd_maxThreshold,
                                 self.bd_thresholdStep)
            ]
            for th in ths:
                _, img = cv.threshold(processed_seg, th, 255, cv.THRESH_BINARY)
                show_img("threshold for %s" % str(th), img, divisor=1)

        # # create border for diamonds close to segment edge
        border = self.border  # in pixels
        if border:
            processed_seg = cv.copyMakeBorder(processed_seg,
                                              border,
                                              border,
                                              border,
                                              border,
                                              borderType=cv.BORDER_CONSTANT,
                                              value=255)
            if verbose:
                show_img("7: With border", processed_seg, divisor=1)

        # Blob detection
        blob_keypts = self.blob_detector_seg.detect(processed_seg)
        log.debug(" detected %i blobs" % len(blob_keypts))

        for keypt in blob_keypts:
            seg.diamonds.append(
                Blob(int(keypt.pt[0] - border + seg.x),
                     int(keypt.pt[1] - border + seg.y), seg.id))

        if verbose:
            for diamond in seg.diamonds:
                draw_circle(self.rgb_img, diamond.x, diamond.y, 5,
                            consts.GREEN)
            show_img("Current detection", self.rgb_img, divisor=5)
            cv.waitKey()
Exemplo n.º 10
0
    def process_segments_dbscan(self, seg, verbose=False):
        """Process detected segments
        This method processes the detected segments using morphological
        operations, multiotsu thresholding and DBSCAN.

        :param seg: the segment that is being processed
        :param verbose: Activate debug windows to show what's happening here
        :return: No return
        """
        log.debug("SEG ID = %i" % seg.id)
        self.gray_seg = self.gray_img[seg.y:seg.y + seg.h, seg.x:seg.x + seg.w]

        if verbose:
            self.rgb_seg = cv.cvtColor(self.gray_seg, cv.COLOR_GRAY2BGR)
            self.show_img("0: Input segment", self.gray_seg, divisor=6)

        # remove reflections
        min_val_pixel = np.min(self.gray_seg)
        self.gray_seg[self.gray_seg == 255] = min_val_pixel
        if verbose:
            self.show_img("1: Removed reflections", self.gray_seg, divisor=6)

        # median blur
        processed_seg = cv.medianBlur(self.gray_seg, self.seg_median_blur)
        if verbose:
            self.show_img("2: Blurred segment", processed_seg, divisor=6)

        # remove "salt" noise
        opening_kernel = cv.getStructuringElement(
            cv.MORPH_ELLIPSE,
            (self.seg_opening_kern_size, self.seg_opening_kern_size))
        processed_seg = cv.morphologyEx(processed_seg,
                                        cv.MORPH_OPEN,
                                        opening_kernel,
                                        iterations=self.seg_opening_iters)
        if verbose:
            self.show_img("3: Opening after input", processed_seg, divisor=6)

        # multi otsu thresholding
        thresholds = threshold_multiotsu(processed_seg,
                                         classes=self.nb_otsu_classes)
        processed_seg = np.uint8(np.digitize(processed_seg, bins=thresholds))
        processed_seg[processed_seg == 0] = 255
        processed_seg[processed_seg == 1] = 150
        processed_seg[processed_seg == 2] = 100
        processed_seg[processed_seg == 3] = 50
        processed_seg[processed_seg == 4] = 0
        processed_seg[processed_seg == 5] = 0
        if verbose:
            self.show_img("5: Multi Otsu", processed_seg, divisor=6)

        # dilate with radial kernel
        radial_kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, (3, 3))
        # processed_seg = cv.dilate(processed_seg, radial_kernel,
        #                           iterations=1)
        processed_seg = cv.morphologyEx(processed_seg,
                                        cv.MORPH_OPEN,
                                        radial_kernel,
                                        iterations=3)
        if verbose:
            self.show_img("6: Openend output", processed_seg, divisor=6)

        closing_kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, (3, 3))
        processed_seg = cv.morphologyEx(processed_seg,
                                        cv.MORPH_CLOSE,
                                        closing_kernel,
                                        iterations=4)
        if verbose:
            self.show_img("7: closing after otsu", processed_seg, divisor=6)

        # Blob detection
        blob_keypts = self.blob_detector_seg.detect(processed_seg)

        # DBSCAN
        keypt_list = []
        for keypt in blob_keypts:
            # print(keypt.size)
            keypt_list.append([keypt.pt[0], keypt.pt[1]])
        dbscan_input = np.array(keypt_list)
        do_dbscan = True
        log.debug(" detected %i blobs" % len(blob_keypts))
        detected_blobs = len(blob_keypts)
        if detected_blobs < 10:
            do_dbscan = False
            log.error("Less than 10 diamonds detected. Is the input "
                      "correct?")
        elif detected_blobs > 100:
            dbscan_eps = 65
        elif detected_blobs > 55:
            dbscan_eps = 75
        else:
            dbscan_eps = 85

        if do_dbscan:
            clustering = DBSCAN(eps=dbscan_eps,
                                min_samples=3).fit(dbscan_input)
            counted_vals = Counter(clustering.labels_)
            log.debug("DBSCAN counted_vals = %s" % str(counted_vals))
            most_frequent = counted_vals.most_common(1)[0]

            for i, keypt_label in enumerate(clustering.labels_):
                if keypt_label == most_frequent[0]:
                    seg.diamonds.append(
                        Blob(int(blob_keypts[i].pt[0] + seg.x),
                             int(blob_keypts[i].pt[1] + seg.y), seg.id))

                if verbose:
                    if keypt_label != most_frequent[0]:
                        draw_circle(self.rgb_img,
                                    int(blob_keypts[i].pt[0] + seg.x),
                                    int(blob_keypts[i].pt[1] + seg.y), 10,
                                    consts.RED)
                    else:
                        draw_circle(self.rgb_img,
                                    int(blob_keypts[i].pt[0] + seg.x),
                                    int(blob_keypts[i].pt[1] + seg.y), 10,
                                    consts.GREEN)
        else:
            for keypt in blob_keypts:
                seg.diamonds.append(
                    Blob(int(keypt.pt[0] + seg.x), int(keypt.pt[1] + seg.y),
                         seg.id))

        if verbose:
            self.show_img("Current detection", self.rgb_img, divisor=5)
            cv.waitKey()