예제 #1
0
def main():
    parser = argparse.ArgumentParser(
        description='Classification model training')
    parser.add_argument('--test_net',
                        type=bool,
                        default=True,
                        help='Test trained model on test data')
    parser.add_argument('--node_id', type=int, default=0, help='Node id')
    parser.add_argument('--config_file',
                        type=str,
                        default=None,
                        required=True,
                        help='Optional config file for params')
    parser.add_argument('opts',
                        help='see config.py for all options',
                        default=None,
                        nargs=argparse.REMAINDER)
    if len(sys.argv) == 1:
        parser.print_help()
        sys.exit(1)

    args = parser.parse_args()
    if args.config_file is not None:
        cfg_from_file(args.config_file)
    if args.opts is not None:
        cfg_from_list(args.opts)

    assert_and_infer_cfg()
    print_cfg()

    train(args)
예제 #2
0
def main():
    c2_utils.import_detectron_ops()
    parser = argparse.ArgumentParser(
        description='Classification model testing')
    parser.add_argument('--config_file',
                        type=str,
                        default=None,
                        required=True,
                        help='Optional config file for params')
    parser.add_argument('opts',
                        help='see config.py for all options',
                        default=None,
                        nargs=argparse.REMAINDER)
    if len(sys.argv) == 1:
        parser.print_help()
        sys.exit(1)

    args = parser.parse_args()
    if args.config_file is not None:
        cfg_from_file(args.config_file)
    if args.opts is not None:
        cfg_from_list(args.opts)

    assert_and_infer_cfg()
    print_cfg()

    test(args)
예제 #3
0
def test_net():
    np.random.seed(cfg.RNG_SEED)

    cfg.TEST.DATA_TYPE = 'test'
    if cfg.TEST.TEST_FULLY_CONV is True:
        cfg.TRAIN.CROP_SIZE = cfg.TRAIN.JITTER_SCALES[0]
        cfg.TEST.USE_MULTI_CROP = 1
    elif cfg.TEST.TEST_FULLY_CONV_FLIP is True:
        cfg.TRAIN.CROP_SIZE = cfg.TRAIN.JITTER_SCALES[0]
        cfg.TEST.USE_MULTI_CROP = 2
    else:
        cfg.TRAIN.CROP_SIZE = 224

    # ------------------------------------------------------------------------
    logger.info('Setting test crop_size to: {}'.format(cfg.TRAIN.CROP_SIZE))

    print_cfg()
    # ------------------------------------------------------------------------

    results = []

    # save temporary file
    pkl_path = os.path.join(cfg.CHECKPOINT.DIR, "results_probs.pkl")
    assert os.path.exists(pkl_path)
    with open(pkl_path, 'r') as fin:
        results = pickle.load(fin)

    # evaluate
    if cfg.FILENAME_GT is not None:
        final_res = evaluate_result(results)
        for metric in final_res.keys():
            if metric != 'per_class_ap':  # Too many things in this one
                print('{}: {}'.format(metric, final_res[metric]))
        pkl_out_path = os.path.join(cfg.CHECKPOINT.DIR, 'results_scores.pkl')
        print('Storing scores in {}'.format(pkl_out_path))
        with open(pkl_out_path, 'w') as fout:
            pickle.dump(final_res, fout)
        logger.info('=========================================')
        logger.info('=============Random Baseline=============')
        logger.info('=========================================')
        N_RUNS = 10
        rand_res = []
        for _ in range(N_RUNS):
            rand_res.append(
                evaluate_result([(el[0], np.random.random(len(el[1]), ))
                                 for el in results]))
        logger.info(
            'Emperical random baseline, averaged over {} runs'.format(N_RUNS))
        for metric in rand_res[0].keys():
            if metric != 'per_class_ap':  # Too many things in this one
                print('{}: {}'.format(metric,
                                      np.mean([el[metric]
                                               for el in rand_res])))
예제 #4
0
def main():
    parser = argparse.ArgumentParser(description='Visualize predictions')
    parser.add_argument('--config_file', type=str, default=None, required=True,
                        help='Optional config file for params')
    parser.add_argument('opts', help='see config.py for all options',
                        default=None, nargs=argparse.REMAINDER)
    if len(sys.argv) == 1:
        parser.print_help()
        sys.exit(1)

    args = parser.parse_args()
    if args.config_file is not None:
        cfg_from_file(args.config_file)
    if args.opts is not None:
        cfg_from_list(args.opts)

    assert_and_infer_cfg()
    print_cfg()
    visualize_location(args)
예제 #5
0
def test_net():
    workspace.GlobalInit(['caffe2', '--caffe2_log_level=0'])
    np.random.seed(cfg.RNG_SEED)

    cfg.TEST.DATA_TYPE = 'test'
    if cfg.TEST.TEST_FULLY_CONV is True:
        cfg.TRAIN.CROP_SIZE = cfg.TRAIN.JITTER_SCALES[0]
        cfg.TEST.USE_MULTI_CROP = 1
    elif cfg.TEST.TEST_FULLY_CONV_FLIP is True:
        cfg.TRAIN.CROP_SIZE = cfg.TRAIN.JITTER_SCALES[0]
        cfg.TEST.USE_MULTI_CROP = 2
    else:
        cfg.TRAIN.CROP_SIZE = 224

    # ------------------------------------------------------------------------
    logger.info('Setting test crop_size to: {}'.format(cfg.TRAIN.CROP_SIZE))

    print_cfg()
    # ------------------------------------------------------------------------

    results = []
    workspace.ResetWorkspace()  # for memory
    logger.info("Done ResetWorkspace...")

    results = test_net_one_section()

    # evaluate
    if cfg.FILENAME_GT is not None:
        evaluate_result(results)

    # save temporary file
    pkl_path = os.path.join(cfg.CHECKPOINT.DIR, "results_probs.pkl")

    with open(pkl_path, 'w') as f:
        pickle.dump(results, f)
    logger.info('Temporary file saved to: {}'.format(pkl_path))
예제 #6
0
def test_one_crop(lfb=None, suffix='', shift=None):
    """Test one crop."""
    workspace.GlobalInit(['caffe2', '--caffe2_log_level=0'])
    np.random.seed(cfg.RNG_SEED)

    cfg.AVA.FULL_EVAL = True

    if lfb is None and cfg.LFB.ENABLED:
        print_cfg()
        lfb = get_lfb(cfg.LFB.MODEL_PARAMS_FILE, is_train=False)

    print_cfg()

    workspace.ResetWorkspace()
    logger.info("Done ResetWorkspace...")

    timer = Timer()

    logger.warning('Testing started...')  # for monitoring cluster jobs

    if shift is None:
        shift = cfg.TEST.CROP_SHIFT
    test_model = model_builder_video.ModelBuilder(train=False,
                                                  use_cudnn=True,
                                                  cudnn_exhaustive_search=True,
                                                  split=cfg.TEST.DATA_TYPE)

    test_model.build_model(lfb=lfb, suffix=suffix, shift=shift)

    if cfg.PROF_DAG:
        test_model.net.Proto().type = 'prof_dag'
    else:
        test_model.net.Proto().type = 'dag'

    workspace.RunNetOnce(test_model.param_init_net)
    workspace.CreateNet(test_model.net)

    misc.save_net_proto(test_model.net)
    misc.save_net_proto(test_model.param_init_net)

    total_test_net_iters = misc.get_total_test_iters(test_model)

    test_model.start_data_loader()
    test_meter = metrics.MetricsCalculator(
        model=test_model,
        split=cfg.TEST.DATA_TYPE,
        video_idx_to_name=test_model.input_db._video_idx_to_name,
        total_num_boxes=(test_model.input_db._num_boxes_used
                         if cfg.DATASET in ['ava', 'avabox'] else None))

    if cfg.TEST.PARAMS_FILE:
        checkpoints.load_model_from_params_file_for_test(
            test_model, cfg.TEST.PARAMS_FILE)
    else:
        raise Exception('No params files specified for testing model.')

    begin_time = time.time()

    for test_iter in range(total_test_net_iters):
        timer.tic()
        workspace.RunNet(test_model.net.Proto().name)
        timer.toc()

        if test_iter == 0:
            misc.print_net(test_model)
            os.system('nvidia-smi')
            misc.show_flops_params(test_model)

        test_meter.calculate_and_log_all_metrics_test(test_iter, timer,
                                                      total_test_net_iters,
                                                      suffix)

    logger.info('TTTTTTTIME: {}'.format(time.time() - begin_time))

    test_meter.finalize_metrics(name=get_test_name(shift))
    test_meter.log_final_metrics(test_iter, total_test_net_iters)
    test_model.shutdown_data_loader()
예제 #7
0
def test_net(full_label_fname=None, store_vis=False):
    workspace.GlobalInit(['caffe2', '--caffe2_log_level=0'])
    np.random.seed(cfg.RNG_SEED)

    cfg.TEST.DATA_TYPE = 'test'
    if cfg.TEST.TEST_FULLY_CONV is True:
        cfg.TRAIN.CROP_SIZE = cfg.TRAIN.JITTER_SCALES[0]
        cfg.TEST.USE_MULTI_CROP = 1
    elif cfg.TEST.TEST_FULLY_CONV_FLIP is True:
        cfg.TRAIN.CROP_SIZE = cfg.TRAIN.JITTER_SCALES[0]
        cfg.TEST.USE_MULTI_CROP = 2
    else:
        cfg.TRAIN.CROP_SIZE = 224

    # ------------------------------------------------------------------------
    logger.info('Setting test crop_size to: {}'.format(cfg.TRAIN.CROP_SIZE))

    print_cfg()
    # ------------------------------------------------------------------------

    results = []
    workspace.ResetWorkspace()  # for memory
    logger.info("Done ResetWorkspace...")

    # save temporary file
    fname = 'results_probs.pkl'
    if full_label_fname is not None:
        fname = 'results_probs_{}.pkl'.format(full_label_fname)
        # Set the dataset size and GT path based on the full lbl fname
        if full_label_fname == 'test_fullLbl':
            cfg.FILENAME_GT = osp.join(osp.dirname(cfg.FILENAME_GT), 'val.txt')
        elif full_label_fname == 'train_fullLbl':
            cfg.FILENAME_GT = osp.join(osp.dirname(cfg.FILENAME_GT),
                                       'train.txt')
        else:
            raise NotImplementedError(
                'Unknown full label fname {}'.format(full_label_fname))
        cfg.TEST.DATASET_SIZE = count_lines(cfg.FILENAME_GT)
    pkl_path = os.path.join(cfg.CHECKPOINT.DIR, fname)

    if os.path.exists(pkl_path) and not cfg.TEST.FORCE_RECOMPUTE_RESULTS:
        logger.warning('READING PRE-COMPUTED RESULTS! Delete the {} file '
                       'or set TEST.FORCE_RECOMPUTE_RESULTS True '
                       'to recompute the test results'.format(pkl_path))
        with open(pkl_path, 'r') as fin:
            results = pickle.load(fin)
    else:
        results = test_net_one_section(full_label_fname=full_label_fname,
                                       store_vis=store_vis)

    with open(pkl_path, 'w') as f:
        pickle.dump(results, f)
    logger.info('Temporary file saved to: {}'.format(pkl_path))

    # evaluate
    if cfg.FILENAME_GT is not None:
        logger.info('Overall perf (full label: %s): %s', full_label_fname,
                    evaluate_result(results))
        logger.info('=========================================')
        logger.info('=============Random Baseline=============')
        logger.info('=========================================')
        N_RUNS = 10
        rand_res = []
        for _ in range(N_RUNS):
            rand_res.append(
                evaluate_result([(el[0], np.random.random(len(el[1]), ))
                                 for el in results]))
        logger.info(
            'Emperical random baseline, averaged over {} runs'.format(N_RUNS))
        for metric in rand_res[0].keys():
            logger.info('{}: {}'.format(
                metric, np.mean([el[metric] for el in rand_res])))