Example #1
0
def merge_detections(image_name, results):
    #    project = 'SKU_dataset'
    result_df = pandas.DataFrame()
    result_df['x1'] = results[:, 0].astype(int)
    result_df['y1'] = results[:, 1].astype(int)
    result_df['x2'] = results[:, 2].astype(int)
    result_df['y2'] = results[:, 3].astype(int)
    result_df['confidence'] = results[:, 4]
    result_df['hard_score'] = results[:, 5]
    result_df['uuid'] = 'object_label'
    result_df['label_type'] = 'object_label'
    #    result_df['project'] = project
    result_df['image_name'] = image_name

    result_df.reset_index()
    result_df['id'] = result_df.index
    pixel_data = None
    duplicate_merger = DuplicateMerger()
    duplicate_merger.multiprocess = False
    duplicate_merger.compression_factor = 1
    #    project = result_df['project'].iloc[0]
    image_name = result_df['image_name'].iloc[0]
    if pixel_data is None:
        if 'COLAB_GPU' in os.environ:
            pixel_data = read_image_bgr(os.path.join('dataset', image_name))
        else:
            pixel_data = read_image_bgr(os.path.join(root_dir(), image_name))

    filtered_data = duplicate_merger.filter_duplicate_candidates(
        result_df, pixel_data)
    return filtered_data
Example #2
0
 def load_image(self, image_index):
     """ Load an image at the image_index.
     """
     return read_image_bgr(self.image_path(image_index))
Example #3
0
def infer(weights, image_dir, labels_to_name, output_file, threshold,
          hard_score_rate):
    model = models.load_model(weights,
                              backbone_name='resnet50',
                              convert=1,
                              nms=False)
    labels_to_names = labels_to_name
    csv_data_lst = []
    csv_data_lst.append(
        ['image_id', 'x1', 'y1', 'x2', 'y2', 'confidence', 'hard_score'])
    threshold = threshold
    hard_score_rate = hard_score_rate
    max_detections = 9999
    image_dir = image_dir
    images = os.listdir(image_dir)
    if os.path.exists(str(output_file) + '/' + 'images'):
        shutil.rmtree(str(output_file) + '/' + 'images')
    os.makedirs(str(output_file) + '/' + 'images')

    # Run inference
    t0 = time.time()
    for img in images:
        image_path = image_dir + '/' + img
        t = time.time()
        image = read_image_bgr(image_path)
        #     patches = image.unfold(0, 128, 128).unfold(1, 128, 128).unfold(2, 3, 3)
        draw = image.copy()
        draw = cv2.cvtColor(draw, cv2.COLOR_BGR2RGB)
        image = preprocess_image(image)
        image, scale = resize_image(image)
        boxes, hard_scores, labels, soft_scores = model.predict_on_batch(
            np.expand_dims(image, axis=0))
        soft_scores = np.squeeze(soft_scores, axis=-1)
        soft_scores = hard_score_rate * hard_scores + (
            1 - hard_score_rate) * soft_scores
        boxes /= scale
        indices = np.where(hard_scores[0, :] > threshold)[0]
        scores = soft_scores[0][indices]
        hard_scores = hard_scores[0][indices]
        scores_sort = np.argsort(-scores)[:max_detections]
        image_boxes = boxes[0, indices[scores_sort], :]
        image_scores = scores[scores_sort]
        image_hard_scores = hard_scores[scores_sort]
        image_labels = labels[0, indices[scores_sort]]
        image_detections = np.concatenate([
            image_boxes,
            np.expand_dims(image_scores, axis=1),
            np.expand_dims(image_labels, axis=1)
        ],
                                          axis=1)
        results = np.concatenate([
            image_boxes,
            np.expand_dims(image_scores, axis=1),
            np.expand_dims(image_hard_scores, axis=1),
            np.expand_dims(image_labels, axis=1)
        ],
                                 axis=1)
        filtered_data = EmMerger.merge_detections(image_path, results)
        filtered_boxes = []
        filtered_scores = []
        filtered_labels = []

        for ind, detection in filtered_data.iterrows():
            box = np.asarray([
                detection['x1'], detection['y1'], detection['x2'],
                detection['y2']
            ])
            filtered_boxes.append(box)
            filtered_scores.append(detection['confidence'])
            filtered_labels.append('{0:.2f}'.format(detection['hard_score']))
            row = [
                image_path, detection['x1'], detection['y1'], detection['x2'],
                detection['y2'], detection['confidence'],
                detection['hard_score']
            ]
            csv_data_lst.append(row)

        for box, score, label in zip(filtered_boxes, filtered_scores,
                                     filtered_labels):
            # scores are sorted so we can break
            if score < threshold:
                break

            color = [
                31, 0, 255
            ]  #change the length of color array here based on the object classes, here I have hardcoded!

            b = box.astype(int)
            draw_box(draw, b, color=color)

            caption = "{} {:.3f}".format(
                labels_to_names[0],
                score)  #hardcoded the index in the dictionary
            draw_caption(draw, b, caption)

        plt.figure(figsize=(20, 20))
        plt.axis('off')
        plt.imshow(draw)
        plt.savefig(
            str(output_file) + '/' + 'images' + '/' + str(img.split('.')[0]) +
            '.png')