コード例 #1
0
ファイル: debug.py プロジェクト: neuron888/keras-maskrcnn
def run(generator, args, anchor_params):
    # display images, one at a time
    for i in range(generator.size()):
        # load the data
        image       = generator.load_image(i)
        annotations = generator.load_annotations(i)

        # apply random transformations
        if args.random_transform:
            image, annotations = generator.random_transform_group_entry(image, annotations)

        # resize the image and annotations
        if args.resize:
            image, image_scale  = generator.resize_image(image)
            annotations['bboxes'] *= image_scale
            for m in range(len(annotations['masks'])):
                annotations['masks'][m], _ = generator.resize_image(annotations['masks'][m])

        anchors = anchors_for_shape(image.shape, anchor_params=anchor_params)
        positive_indices, _, max_indices = compute_gt_annotations(anchors, annotations['bboxes'])

        # draw anchors on the image
        if args.anchors:
            anchors = generator.generate_anchors(image.shape)
            positive_indices, _, max_indices = compute_gt_annotations(anchors, annotations['bboxes'])
            draw_boxes(image, anchors[positive_indices], (255, 255, 0), thickness=1)

        # draw annotations on the image
        if args.annotations:
            # draw annotations in red
            draw_annotations(image, annotations, color=(0, 0, 255), label_to_name=generator.label_to_name)

            # draw regressed anchors in green to override most red annotations
            # result is that annotations without anchors are red, with anchors are green
            draw_boxes(image, annotations['bboxes'][max_indices[positive_indices], :], (0, 255, 0))

        # Draw masks over the image with random colours
        if args.masks:
            for m in range(len(annotations['masks'])):
                # crop the mask with the related bbox size, and then draw them
                box = annotations['bboxes'][m].astype(int)
                mask = annotations['masks'][m][box[1]:box[3], box[0]:box[2]]
                draw_mask(image, box, mask, annotations['labels'][m].astype(int))
                # add the label caption
                caption = '{}'.format(generator.label_to_name(annotations['labels'][m]))
                draw_caption(image, box, caption)

        cv2.imshow('Image', image)
        if cv2.waitKey() == ord('q'):
            return False
    return True
コード例 #2
0
def main(args=None):
    # parse arguments
    if args is None:
        args = sys.argv[1:]
    args = parse_args(args)

    # optionally choose specific GPU
    # if args.gpu:
    #     os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu

    # create the generators
    test_generator = create_test_generator(args)

    if os.path.exists(
            '/app/Ant_mcs_detection/output/test_set_all_detections.pkl'):
        with open('/app/Ant_mcs_detection/output/test_set_all_detections.pkl',
                  'rb') as f:
            all_detections = pickle.load(f)
    else:
        with tf.device('/device:GPU:3'):
            config = tf.ConfigProto(allow_soft_placement=True,
                                    log_device_placement=False,
                                    device_count={
                                        'CPU': 1,
                                        'GPU': 1
                                    })
            config.gpu_options.allow_growth = True
            session = tf.Session(config=config)
            keras.backend.set_session(session)

        print('Creating model, this may take a second...')
        model = models.load_model(os.path.abspath(args.weights),
                                  backbone_name=args.backbone)
        model = models.convert_model(model)

        # print model summary
        print(model.summary())

        score_threshold = 0.05
        max_detections = 0
        save_path = None
        eval_batch_size = 16

        all_detections = _get_detections(test_generator,
                                         model,
                                         score_threshold=score_threshold,
                                         max_detections=max_detections,
                                         save_path=save_path,
                                         eval_batch_size=eval_batch_size)
        with open('/app/Ant_mcs_detection/output/test_set_all_detections.pkl',
                  'wb') as f:
            pickle.dump(all_detections, f)

    all_annotations = _get_annotations(
        test_generator, predicted_items_count=len(all_detections))

    proba_percentiles = np.linspace(10, 99, 90)
    iou_values_per_example = []
    for example_idx in tqdm(range(len(all_annotations)),
                            total=len(all_annotations)):
        image = np.copy(test_generator.load_image(example_idx))
        _, example_mask, _, _ = test_generator.data_manager.get_all_data(
            test_generator.image_names[example_idx])
        example_mask = np.copy(example_mask)

        image = np.copy(image)

        image[:, :, 0] = 255 - image[:, :, 0]
        image[:, :, 1] = 255 - image[:, :, 1]
        # image = np.ma.array(image)
        image_mask = np.tile(1 - example_mask, (1, 1, 3))
        image = image * image_mask
        annotations = test_generator.load_annotations(example_idx)
        anchors = anchors_for_shape(image.shape, anchor_params=None)
        positive_indices, _, max_indices = compute_gt_annotations(
            anchors, annotations['bboxes'])

        curr_detections = all_detections[example_idx][0]
        # selected_detections = curr_detections[curr_detections[:, -1] >= args.proba_threshold, :]
        # bboxes = selected_detections[:, :4]
        # probas = selected_detections[:, -1]
        bboxes = curr_detections[:, :4]
        probas = curr_detections[:, -1]

        # region filter detections

        bboxes_filtered = np.copy(bboxes)
        scores_filtered = np.copy(probas)
        # proba_threshold = np.percentile(scores_filtered, args.shrinking_proba_perc_threshold)
        proba_threshold = args.proba_threshold
        filter_indices = np.where(scores_filtered >= proba_threshold)[0]
        bboxes_filtered = bboxes_filtered[filter_indices, :]
        scores_filtered = scores_filtered[filter_indices]

        #region IR_only
        f = plt.figure(figsize=(6, 6), dpi=300)
        plt.imshow(image[:, :, 0], cmap='gray', vmin=0, vmax=255)
        for i in range(annotations['bboxes'].shape[0]):
            label = annotations['labels'][i]
            box = annotations['bboxes'][i]
            x1, y1, x2, y2 = np.array(box).astype(int)
            plt.plot([x1, x1, x2, x2, x1], [y1, y2, y2, y1, y1],
                     color='red',
                     linewidth=2)

        for bbox, proba in zip(bboxes_filtered, scores_filtered):
            x1, y1, x2, y2 = bbox.astype(int)
            plt.plot([x1, x1, x2, x2, x1], [y1, y2, y2, y1, y1],
                     color='lightgreen',
                     linewidth=2,
                     alpha=proba / probas.max())
            plt.text(x1, y1, '%.3f' % proba, fontsize=10)
        plt.axis('off')
        plt.savefig(
            os.path.join('/app/Ant_mcs_detection/output/images_IRonly/',
                         'test_img_IR_withLabels_%05d.png' % example_idx))
        plt.close()
        #endregion IR_only

        # region IR_WV_SLP
        f = plt.figure(figsize=(6, 6), dpi=300)
        plt.imshow(image)
        for i in range(annotations['bboxes'].shape[0]):
            label = annotations['labels'][i]
            box = annotations['bboxes'][i]
            x1, y1, x2, y2 = np.array(box).astype(int)
            plt.plot([x1, x1, x2, x2, x1], [y1, y2, y2, y1, y1],
                     color='red',
                     linewidth=2)

        for bbox, proba in zip(bboxes_filtered, scores_filtered):
            x1, y1, x2, y2 = bbox.astype(int)
            plt.plot([x1, x1, x2, x2, x1], [y1, y2, y2, y1, y1],
                     color='lightgreen',
                     linewidth=2,
                     alpha=proba / probas.max())
            plt.text(x1, y1, '%.3f' % proba, fontsize=10)
        plt.axis('off')
        plt.savefig(
            os.path.join(
                '/app/Ant_mcs_detection/output/images_IR_WV_SLP/',
                'test_img_IR_WV_SLP_withLabels_%05d.png' % example_idx))
        plt.close()