Esempio n. 1
0
def predict(pred_func, input_file):
    img = cv2.imread(input_file, cv2.IMREAD_COLOR)
    results = detect_one_image(img, pred_func)
    final = draw_final_outputs(
        img, results)  # image contain boxes,labels and scores
    viz = np.concatenate((img, final), axis=1)
    tpviz.interactive_imshow(viz)
Esempio n. 2
0
    def process_image(detector, img_path):
        # Detect faces
        image_bgr = cv2.imread(img_path)
        face_results = detector.detect(image_bgr, rgb=False)

        # Select best face to ignore false positives, such as faces on the clothes
        best_faces = pick_best_faces(face_results, args.max_num_faces)

        # Visualize detected faces for quick verification
        nonlocal n_visualize
        if n_visualize > 0:
            image_bgr = draw_final_outputs(
                image_bgr, best_faces, show_ids=face_detector.get_class_ids())
            cv2.imshow('face detection', image_bgr)
            cv2.waitKey(0)
            n_visualize -= 1

        return best_faces
Esempio n. 3
0
    parser.add_argument(
        '--config',
        default='',
        type=str,
        help='Configurations of object detection model',
        nargs='+'
    )
    args = parser.parse_args()
    if args.config:
        cfg.update_args(args.config)

    obj_detector = TensorPackDetector('/root/datasets/figmarcnn/checkpoint')
    img = cv2.imread('/root/datasets/img-folder/a.png', cv2.IMREAD_COLOR)

    results = obj_detector.detect(img, rgb=False)
    final = draw_final_outputs(img, results)  # image contain boxes,labels and scores
    viz = np.concatenate((img, final), axis=1)
    tpviz.interactive_imshow(viz)



'''
--image
/root/datasets/myimage/8.jpeg
--cam
0
--obj_model
two-stage
--obj_ckpt
/root/datasets/figmarcnn/checkpoint
--obj_config
Esempio n. 4
0
def visualize(model, model_path, nr_visualize=100, output_dir='output'):
    """
    Visualize some intermediate results (proposals, raw predictions) inside the pipeline.
    """
    df = get_train_dataflow()  # we don't visualize mask stuff
    df.reset_state()

    pred = OfflinePredictor(
        PredictConfig(model=model,
                      session_init=get_model_loader(model_path),
                      input_names=['image', 'gt_boxes', 'gt_labels'],
                      output_names=[
                          'generate_{}_proposals/boxes'.format(
                              'fpn' if cfg.MODE_FPN else 'rpn'),
                          'generate_{}_proposals/scores'.format(
                              'fpn' if cfg.MODE_FPN else 'rpn'),
                          'fastrcnn_all_scores',
                          'output/boxes',
                          'output/scores',
                          'output/labels',
                      ]))

    if os.path.isdir(output_dir):
        shutil.rmtree(output_dir)
    utils.fs.mkdir_p(output_dir)
    with tqdm.tqdm(total=nr_visualize) as pbar:
        for idx, dp in itertools.islice(enumerate(df), nr_visualize):
            # img = dp[0]
            img = dp['image']
            if cfg.MODE_MASK:
                # gt_boxes, gt_labels, gt_masks = dp[-3:]
                gt_boxes, gt_labels, gt_masks = dp['gt_boxes'], dp[
                    'gt_labels'], dp['gt_masks']
            else:
                # gt_boxes, gt_labels = dp[-2:]
                gt_boxes, gt_labels = dp['gt_boxes'], dp['gt_labels']

            rpn_boxes, rpn_scores, all_scores, \
            final_boxes, final_scores, final_labels = pred(img, gt_boxes, gt_labels)

            # draw groundtruth boxes
            gt_viz = draw_annotation(img, gt_boxes, gt_labels)
            # draw best proposals for each groundtruth, to show recall
            proposal_viz, good_proposals_ind = draw_proposal_recall(
                img, rpn_boxes, rpn_scores, gt_boxes)
            # draw the scores for the above proposals
            score_viz = draw_predictions(img, rpn_boxes[good_proposals_ind],
                                         all_scores[good_proposals_ind])

            results = [
                DetectionResult(*args)
                for args in zip(final_boxes, final_scores, final_labels,
                                [None] * len(final_labels))
            ]
            final_viz = draw_final_outputs(img, results)

            viz = tpviz.stack_patches(
                [gt_viz, proposal_viz, score_viz, final_viz], 2, 2)

            if os.environ.get('DISPLAY', None):
                tpviz.interactive_imshow(viz)
            cv2.imwrite("{}/{:03d}.png".format(output_dir, idx), viz)
            pbar.update()