コード例 #1
0
def from_retinanet(annotations_csv):
    model = deepforest.deepforest()
    model.use_release()
    """use the keras retinanet source to create detections"""
    ### Keras retinanet
    # Format args for CSV generator
    classes_file = utilities.create_classes(annotations_csv)
    arg_list = utilities.format_args(annotations_csv, classes_file,
                                     model.config)
    args = parse_args(arg_list)

    # create generator
    validation_generator = csv_generator.CSVGenerator(
        args.annotations,
        args.classes,
        image_min_side=args.image_min_side,
        image_max_side=args.image_max_side,
        config=args.config,
        shuffle_groups=False,
    )

    all_detections = _get_detections(validation_generator,
                                     model.prediction_model,
                                     score_threshold=args.score_threshold,
                                     max_detections=100)
    all_annotations = _get_annotations(validation_generator)

    return all_detections, all_annotations, validation_generator
コード例 #2
0
def f1_evaluation(generator,model,iou_threshold=0.5,score_threshold=0.05,max_detections=100,save_path=None):
    all_detections     = _get_detections(generator, model, score_threshold=score_threshold, max_detections=max_detections, save_path=save_path)
    all_annotations    = _get_annotations(generator)
    best_thresh = {}
    best_f1 = {}
    # process detections and annotations
    for label in range(generator.num_classes()):
        if not generator.has_label(label):
            continue

        true_positives, false_positives, scores, num_annotations = compute_measures(
                generator, label, iou_threshold, all_detections, all_annotations)
        if num_annotations == 0:
            continue

        f1_points, scores = compute_f1(true_positives, false_positives, scores, num_annotations)
        best_f1[label] = np.max(f1_points) if f1_points.size else 0
        best_thresh[label] = scores[np.argmax(f1_points)] if f1_points.size else 0

    return best_f1, best_thresh
コード例 #3
0
ファイル: predict.py プロジェクト: kayoyin/digit-detector
def infer(
    generator,
    model,
    max_detections=100,
):
    all_detections = _get_detections(generator, model, max_detections=max_detections)
    submission = []

    for i in range(generator.size()):
        labels = []
        boxes = []
        scores = []
        for label in range(generator.num_classes()):
            detections = all_detections[i][label]
            for d in detections:
                labels.append(label)
                boxes.append(d[:4])
                scores.append(d[4])

        submission.append(to_dict(labels, boxes, scores))

    submission = order_submission(submission)
    with open("submission.json", 'w') as file:
        json.dump(submission, file)
コード例 #4
0
def evaluate_dual_memory_model(generator,
                               models,
                               iou_threshold=0.5,
                               score_threshold=0.05,
                               max_detections=100,
                               save_path=None):
    detections_per_model = []
    all_inferences = []

    for model in models:
        current_detections, current_inferences = _get_detections(
            generator,
            model,
            score_threshold=score_threshold,
            max_detections=max_detections,
            save_path=save_path)
        detections_per_model.append(current_detections)
        all_inferences.append(current_inferences)
    average_precisions = {}
    all_annotations = _get_annotations(generator)

    # all_detections = detections_per_model[0]

    # process detections and annotations
    final_detections = {}
    for label in range(generator.num_classes()):
        if not generator.has_label(label):
            continue

        false_positives = np.zeros((0, ))
        true_positives = np.zeros((0, ))
        scores = np.zeros((0, ))
        num_annotations = 0.0

        for i in range(generator.size()):
            detections = [
                all_detections[i][label]
                for all_detections in detections_per_model
            ]
            detections = combine_all_detections(detections, iou_threshold)
            key = f"label={label}, instance_index={i}"
            final_detections[key] = [[d.tolist() for d in detection]
                                     for detection in detections]
            annotations = all_annotations[i][label]
            num_annotations += annotations.shape[0]
            detected_annotations = []

            for d in detections:
                scores = np.append(scores, d[4])

                if annotations.shape[0] == 0:
                    false_positives = np.append(false_positives, 1)
                    true_positives = np.append(true_positives, 0)
                    continue

                overlaps = compute_overlap(np.expand_dims(d, axis=0),
                                           annotations)
                assigned_annotation = np.argmax(overlaps, axis=1)
                max_overlap = overlaps[0, assigned_annotation]

                if max_overlap >= iou_threshold and assigned_annotation not in detected_annotations:
                    false_positives = np.append(false_positives, 0)
                    true_positives = np.append(true_positives, 1)
                    detected_annotations.append(assigned_annotation)
                else:
                    false_positives = np.append(false_positives, 1)
                    true_positives = np.append(true_positives, 0)

        if num_annotations == 0:
            average_precisions[label] = 0, 0
            continue

        # Sort by score:
        indices = np.argsort(-scores)
        false_positives = false_positives[indices]
        true_positives = true_positives[indices]

        # Compute false positives and true positives:
        false_positives = np.cumsum(false_positives)
        true_positives = np.cumsum(true_positives)

        # Compute recall and precision:
        recall = true_positives / num_annotations
        precision = true_positives / np.maximum(
            true_positives + false_positives,
            np.finfo(np.float64).eps)

        # Compute average precision:
        average_precision = _compute_ap(recall, precision)
        average_precisions[label] = average_precision, num_annotations

    # Inference time:
    inference_time = np.sum(all_inferences) / generator.size()

    return average_precisions, inference_time, detections_per_model, final_detections
コード例 #5
0
def main(args=None):
    # parse arguments
    if args is None:
        args = sys.argv[1:]
    args = parse_args(args)

    # optionally choose specific GPU
    # if args.gpu:
    #     os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu

    # create the generators
    test_generator = create_test_generator(args)

    if os.path.exists(
            '/app/Ant_mcs_detection/output/test_set_all_detections.pkl'):
        with open('/app/Ant_mcs_detection/output/test_set_all_detections.pkl',
                  'rb') as f:
            all_detections = pickle.load(f)
    else:
        with tf.device('/device:GPU:3'):
            config = tf.ConfigProto(allow_soft_placement=True,
                                    log_device_placement=False,
                                    device_count={
                                        'CPU': 1,
                                        'GPU': 1
                                    })
            config.gpu_options.allow_growth = True
            session = tf.Session(config=config)
            keras.backend.set_session(session)

        print('Creating model, this may take a second...')
        model = models.load_model(os.path.abspath(args.weights),
                                  backbone_name=args.backbone)
        model = models.convert_model(model)

        # print model summary
        print(model.summary())

        score_threshold = 0.05
        max_detections = 0
        save_path = None
        eval_batch_size = 16

        all_detections = _get_detections(test_generator,
                                         model,
                                         score_threshold=score_threshold,
                                         max_detections=max_detections,
                                         save_path=save_path,
                                         eval_batch_size=eval_batch_size)
        with open('/app/Ant_mcs_detection/output/test_set_all_detections.pkl',
                  'wb') as f:
            pickle.dump(all_detections, f)

    all_annotations = _get_annotations(
        test_generator, predicted_items_count=len(all_detections))

    proba_percentiles = np.linspace(10, 99, 90)
    iou_values_per_example = []
    for example_idx in tqdm(range(len(all_annotations)),
                            total=len(all_annotations)):
        image = np.copy(test_generator.load_image(example_idx))
        _, example_mask, _, _ = test_generator.data_manager.get_all_data(
            test_generator.image_names[example_idx])
        example_mask = np.copy(example_mask)

        image = np.copy(image)

        image[:, :, 0] = 255 - image[:, :, 0]
        image[:, :, 1] = 255 - image[:, :, 1]
        # image = np.ma.array(image)
        image_mask = np.tile(1 - example_mask, (1, 1, 3))
        image = image * image_mask
        annotations = test_generator.load_annotations(example_idx)
        anchors = anchors_for_shape(image.shape, anchor_params=None)
        positive_indices, _, max_indices = compute_gt_annotations(
            anchors, annotations['bboxes'])

        curr_detections = all_detections[example_idx][0]
        # selected_detections = curr_detections[curr_detections[:, -1] >= args.proba_threshold, :]
        # bboxes = selected_detections[:, :4]
        # probas = selected_detections[:, -1]
        bboxes = curr_detections[:, :4]
        probas = curr_detections[:, -1]

        # region filter detections

        bboxes_filtered = np.copy(bboxes)
        scores_filtered = np.copy(probas)
        # proba_threshold = np.percentile(scores_filtered, args.shrinking_proba_perc_threshold)
        proba_threshold = args.proba_threshold
        filter_indices = np.where(scores_filtered >= proba_threshold)[0]
        bboxes_filtered = bboxes_filtered[filter_indices, :]
        scores_filtered = scores_filtered[filter_indices]

        #region IR_only
        f = plt.figure(figsize=(6, 6), dpi=300)
        plt.imshow(image[:, :, 0], cmap='gray', vmin=0, vmax=255)
        for i in range(annotations['bboxes'].shape[0]):
            label = annotations['labels'][i]
            box = annotations['bboxes'][i]
            x1, y1, x2, y2 = np.array(box).astype(int)
            plt.plot([x1, x1, x2, x2, x1], [y1, y2, y2, y1, y1],
                     color='red',
                     linewidth=2)

        for bbox, proba in zip(bboxes_filtered, scores_filtered):
            x1, y1, x2, y2 = bbox.astype(int)
            plt.plot([x1, x1, x2, x2, x1], [y1, y2, y2, y1, y1],
                     color='lightgreen',
                     linewidth=2,
                     alpha=proba / probas.max())
            plt.text(x1, y1, '%.3f' % proba, fontsize=10)
        plt.axis('off')
        plt.savefig(
            os.path.join('/app/Ant_mcs_detection/output/images_IRonly/',
                         'test_img_IR_withLabels_%05d.png' % example_idx))
        plt.close()
        #endregion IR_only

        # region IR_WV_SLP
        f = plt.figure(figsize=(6, 6), dpi=300)
        plt.imshow(image)
        for i in range(annotations['bboxes'].shape[0]):
            label = annotations['labels'][i]
            box = annotations['bboxes'][i]
            x1, y1, x2, y2 = np.array(box).astype(int)
            plt.plot([x1, x1, x2, x2, x1], [y1, y2, y2, y1, y1],
                     color='red',
                     linewidth=2)

        for bbox, proba in zip(bboxes_filtered, scores_filtered):
            x1, y1, x2, y2 = bbox.astype(int)
            plt.plot([x1, x1, x2, x2, x1], [y1, y2, y2, y1, y1],
                     color='lightgreen',
                     linewidth=2,
                     alpha=proba / probas.max())
            plt.text(x1, y1, '%.3f' % proba, fontsize=10)
        plt.axis('off')
        plt.savefig(
            os.path.join(
                '/app/Ant_mcs_detection/output/images_IR_WV_SLP/',
                'test_img_IR_WV_SLP_withLabels_%05d.png' % example_idx))
        plt.close()