Esempio n. 1
0
def offline_evaluate(pred_func, output_file):
    df = get_test_dataflow(add_mask=False)
    all_results = eval_on_dataflow(
        df, lambda img: detect_one_image(img, pred_func))
    #print(all_results)
    #    input()
    with open(output_file, 'w') as f:
        json.dump(all_results, f, cls=MyEncoder)
    ret = print_evaluation_scores(output_file)
    print(ret)
Esempio n. 2
0
def visualize(model_path, nr_visualize=50, output_dir='visualize'):
    df = get_test_dataflow()  # we don't visualize mask stuff
    df.reset_state()

    pred = OfflinePredictor(
        PredictConfig(model=Model(),
                      session_init=get_model_loader(model_path),
                      input_names=['image', 'gt_boxes', 'gt_labels'],
                      output_names=[
                          'generate_rpn_proposals/boxes',
                          'generate_rpn_proposals/probs',
                          'fastrcnn_all_probs',
                          'final_boxes',
                          'final_probs',
                          'final_labels',
                          'final_masks',
                      ]))

    if os.path.isdir(output_dir):
        shutil.rmtree(output_dir)
    utils.fs.mkdir_p(output_dir)
    with tqdm.tqdm(total=nr_visualize) as pbar:
        for idx, dp in itertools.islice(enumerate(df.get_data()),
                                        nr_visualize):
            img, _, _, gt_boxes, gt_labels, _ = dp

            rpn_boxes, rpn_scores, all_probs, \
                final_boxes, final_probs, final_labels, final_masks = pred(img, gt_boxes, gt_labels)

            # draw groundtruth boxes
            gt_viz = draw_annotation(img, gt_boxes, gt_labels)
            # draw best proposals for each groundtruth, to show recall
            proposal_viz, good_proposals_ind = draw_proposal_recall(
                img, rpn_boxes, rpn_scores, gt_boxes)
            # draw the scores for the above proposals
            score_viz = draw_predictions(img, rpn_boxes[good_proposals_ind],
                                         all_probs[good_proposals_ind])

            #results = [DetectionResult(*args) for args in zip(final_boxes, final_probs, final_labels, final_masks)]
            results = [
                DetectionResult(*args)
                for args in zip(final_boxes, final_probs, final_labels,
                                [None] * len(final_labels))
            ]
            final_viz = draw_final_outputs(img, results)

            viz = tpviz.stack_patches(
                [gt_viz, proposal_viz, score_viz, final_viz], 2, 2)

            #            if os.environ.get('DISPLAY', None):
            #                tpviz.interactive_imshow(viz)
            cv2.imwrite("{}/{:03d}.png".format(output_dir, idx), viz)
            pbar.update()
Esempio n. 3
0
def offline_evaluate(pred_func, output_file):
    df = get_test_dataflow()
    all_results = eval_on_dataflow(
        df, lambda img: detect_one_image(img, pred_func))
    print(all_results)
 def _setup_graph(self):
     self.pred = self.trainer.get_predictor(['image'],
                                            get_model_output_names())
     self.df = get_test_dataflow(add_mask=True)
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu

    if args.visualize or args.evaluate or args.predict:
        # autotune is too slow for inference
        os.environ['TF_CUDNN_USE_AUTOTUNE'] = '0'

        assert args.load
        print_config()

    if args.evaluate:
        pred = OfflinePredictor(
            PredictConfig(model=Model(),
                          session_init=get_model_loader(args.load),
                          input_names=['image'],
                          output_names=get_model_output_names()))
        df = get_test_dataflow(add_mask=True)
        df.reset_state()
        all_results, local_score = eval_on_dataflow(
            df, lambda img: detect_one_image(img, pred))
        print("F2 Score: ", local_score)

    elif args.predict:
        imgs = Detection.load_many(
            config.BASEDIR, config.TEST_DATASET,
            add_gt=False)  # to load the class names into caches
        # filter with zero-ship
        imgs = [(img['image_data'], img['id']) for img in imgs]
        pred = OfflinePredictor(
            PredictConfig(model=Model(),
                          session_init=get_model_loader(args.load),
                          input_names=['image'],