Beispiel #1
0
    def filter_duplicate_candidates(self, data, image):

        Params.box_size_factor = 0.5
        Params.min_box_size = 5
        Params.ellipsoid_thresh = 0.5
        Params.min_k = 0

        # TODO time optimization: split into initial clusters using gaussian information rather than heatmap contours
        heat_map = numpy.zeros(shape=[image.shape[0], image.shape[1], 1], dtype=numpy.float64)
        #检测出的框缩小后,按照iou分数求按照高斯分布的热图
        original_detection_centers = self.shrink_boxes(data, heat_map)

        cv2.normalize(heat_map, heat_map, 0, 255, cv2.NORM_MINMAX)
        heat_map = cv2.convertScaleAbs(heat_map)
        h2, heat_map = cv2.threshold(heat_map, 4, 255, cv2.THRESH_TOZERO)
        #得到热图的轮廓
        contours = cv2.findContours(numpy.ndarray.copy(heat_map), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        
        candidates = self.find_new_candidates(contours, heat_map, data, original_detection_centers, image)
        candidates = self.map_original_boxes_to_new_boxes(candidates, original_detection_centers)

        # TODO time optimization: parallelize contours/clusters resolvers.
        # TODO time optimization: convert numpy to tensorflow/keras
        best_detection_ids = {}
        filtered_data = pandas.DataFrame(columns=data.columns)
        for i, candidate in candidates.items():
            label = candidate['original_detection_ids']
            original_detections = data.ix[label]
            original_detections[
                'avg_score'] = 0.5 * original_detections.confidence + 0.5 * original_detections.hard_score
            best_detection_id = original_detections.avg_score.argmax()
            # best_detection_id = original_detections.confidence.argmax()
            # best_detection_id = original_detections.hard_score.argmax()
            best_detection = original_detections.ix[best_detection_id].copy()

            # The following code creates the median bboxes
            # original_detections = original_detections[original_detections.confidence > 0.5]
            # if original_detections.shape[0] > 0:
            #     w = original_detections['x2'] - original_detections['x1']
            #     h = original_detections['y2'] - original_detections['y1']
            #     x = original_detections['x1'] + 0.5 * w
            #     y = original_detections['y1'] + 0.5 * h
            #
            #     med_x = int(round(scipy.percentile(x, 50)))
            #     med_y = int(round(scipy.percentile(y, 50)))
            #     med_w = int(round(scipy.percentile(w, 50)))
            #     med_h = int(round(scipy.percentile(h, 50)))
            #     best_detection['x1'] = med_x - med_w / 2
            #     best_detection['y1'] = med_y - med_h / 2
            #     best_detection['x2'] = med_x + med_w / 2
            #     best_detection['y2'] = med_y + med_h / 2

            best_detection_ids[best_detection_id] = best_detection
            filtered_data = filtered_data.append(best_detection)

        # to handle overlap between contour bboxes
        filtered_data = perform_nms_on_image_dataframe(filtered_data, 0.3)

        return filtered_data
Beispiel #2
0
    def perform_nms(self, candidates, contour_i, curr_data):

        nms_data = perform_nms_on_image_dataframe(curr_data, 0.3)

        for sub_ind, row in nms_data.iterrows():
            curr_box = numpy.asarray([row['x1'], row['y1'], row['x2'], row['y2']])
            box_width = curr_box[BOX.X2] - curr_box[BOX.X1]
            box_height = curr_box[BOX.Y2] - curr_box[BOX.Y1]
            if box_width > Params.min_box_size and box_height > Params.min_box_size:
                candidates.append({'box': curr_box, 'original_detection_ids': []})