コード例 #1
0
def test_model(model_file, multi_gpu_testing, opts=None):
    """Test a model."""
    # Clear memory before inference
    workspace.ResetWorkspace()
    # Run inference
    run_inference(
        model_file, multi_gpu_testing=multi_gpu_testing,
        check_expected_results=True,
    )
コード例 #2
0
ファイル: train_net.py プロジェクト: donnyyou/Detectron
def test_model(model_file, multi_gpu_testing, opts=None):
    """Test a model."""
    # Clear memory before inference
    workspace.ResetWorkspace()
    # Run inference
    run_inference(
        model_file, multi_gpu_testing=multi_gpu_testing,
        check_expected_results=True,
    )
コード例 #3
0
 def test_model(self, model_file):
     """Test a model."""
     # Clear memory before inference
     workspace.ResetWorkspace()
     # Run inference
     run_inference(
         model_file,
         check_expected_results=True,
     )
コード例 #4
0
ファイル: train.py プロジェクト: akshaya-a/Detectron
def train_model():
    """Model training loop."""
    model, weights_file, start_iter, checkpoints, output_dir = create_model()
    if 'final' in checkpoints:
        # The final model was found in the output directory, so nothing to do
        return checkpoints

    setup_model_for_training(model, weights_file, output_dir)
    training_stats = TrainingStats(model)
    CHECKPOINT_PERIOD = int(cfg.TRAIN.SNAPSHOT_ITERS / cfg.NUM_GPUS)
    TEST_PERIOD = int(cfg.TRAIN.TEST_ITERS)

    for cur_iter in range(start_iter, cfg.SOLVER.MAX_ITER):
        if model.roi_data_loader.has_stopped():
            handle_critical_error(model, 'roi_data_loader failed')
        training_stats.IterTic()
        lr = model.UpdateWorkspaceLr(cur_iter,
                                     lr_policy.get_lr_at_iter(cur_iter))
        workspace.RunNet(model.net.Proto().name)
        if cur_iter == start_iter:
            nu.print_net(model)
        training_stats.IterToc()
        training_stats.UpdateIterStats()
        training_stats.LogIterStats(cur_iter, lr)

        if (cur_iter + 1) % CHECKPOINT_PERIOD == 0 and cur_iter > start_iter:
            print("================SAVING MODEL=================")
            checkpoints[cur_iter] = os.path.join(
                output_dir, 'model_iter{}.pkl'.format(cur_iter))
            nu.save_model_to_weights_file(checkpoints[cur_iter], model)
        if (cur_iter + 1) % TEST_PERIOD == 0 and cur_iter > start_iter:
            print("================RUN INFERENCE==================")
            checkpoints[cur_iter] = os.path.join(
                output_dir, 'model_iter{}.pkl'.format(cur_iter))
            nu.save_model_to_weights_file(checkpoints[cur_iter], model)
            run_inference(checkpoints[cur_iter],
                          multi_gpu_testing=False,
                          check_expected_results=True)

        if cur_iter == start_iter + training_stats.LOG_PERIOD:
            # Reset the iteration timer to remove outliers from the first few
            # SGD iterations
            training_stats.ResetIterTimer()

        if np.isnan(training_stats.iter_total_loss):
            handle_critical_error(model, 'Loss is NaN')

    # Save the final model
    print("=====================FINAL=======================")
    checkpoints['final'] = os.path.join(output_dir, 'model_final.pkl')
    nu.save_model_to_weights_file(checkpoints['final'], model)
    # Shutdown data loading threads
    model.roi_data_loader.shutdown()
    return checkpoints
コード例 #5
0
def eval(args, cfg, logger, files):
    files = extract(files)
    files.sort(key=sortMethod)
    for f in files:
        iter_string = re.findall(r'(?<=model_iter)\d+(?=\.pkl)', f)
        checkpoint_iter = int(iter_string[0])
        resume_weights_file = f
        weights_file = os.path.join(cfg.TEST.WEIGHTS, resume_weights_file)
        logger.info(
            '========> Resuming from checkpoint {} at iter {}'.
                format(weights_file, checkpoint_iter)
        )

        run_inference(
            weights_file,
            ind_range=args.range,
            multi_gpu_testing=args.multi_gpu_testing,
            check_expected_results=True,
        )
コード例 #6
0
ファイル: test_net.py プロジェクト: yanxp/detectron-ssm
    if len(sys.argv) == 1:
        parser.print_help()
        sys.exit(1)
    return parser.parse_args()


if __name__ == '__main__':
    workspace.GlobalInit(['caffe2', '--caffe2_log_level=0'])
    logger = setup_logging(__name__)
    args = parse_args()
    logger.info('Called with args:')
    logger.info(args)
    if args.cfg_file is not None:
        merge_cfg_from_file(args.cfg_file)
    if args.opts is not None:
        merge_cfg_from_list(args.opts)
    assert_and_infer_cfg()
    logger.info('Testing with config:')
    logger.info(pprint.pformat(cfg))

    while not os.path.exists(cfg.TEST.WEIGHTS) and args.wait:
        logger.info('Waiting for \'{}\' to exist...'.format(cfg.TEST.WEIGHTS))
        time.sleep(10)

    run_inference(
        cfg.TEST.WEIGHTS,
        ind_range=args.range,
        multi_gpu_testing=args.multi_gpu_testing,
        check_expected_results=True,
    )
コード例 #7
0
    logger.info(args)
    if args.cfg_file is not None:
        merge_cfg_from_file(args.cfg_file)
    if args.opts is not None:
        merge_cfg_from_list(args.opts)
    assert_and_infer_cfg()
    logger.info('Testing with config:')
    logger.info(pprint.pformat(cfg))

    while not os.path.exists(cfg.TEST.WEIGHTS) and args.wait:
        logger.info('Waiting for \'{}\' to exist...'.format(cfg.TEST.WEIGHTS))
        time.sleep(10)

    all_results = run_inference(cfg.TEST.WEIGHTS,
                                ind_range=args.range,
                                multi_gpu_testing=args.multi_gpu_testing,
                                check_expected_results=False,
                                evaluation=False)
    all_boxes = all_results['all_boxes']

    test_dataset = JsonDataset(cfg.TEST.DATASETS[0])

    image_set = test_dataset.name.split('_')[-1]
    root_path = DATASETS[test_dataset.name][ROOT_DIR]
    image_set_path = os.path.join(root_path, 'ImageSets', 'Main',
                                  image_set + '.txt')
    with open(image_set_path, 'r') as f:
        image_index = [x.strip() for x in f.readlines()]

    test_roidb = test_dataset.get_roidb()
    for i, entry in enumerate(test_roidb):
コード例 #8
0
    logger.info(args)
    if args.cfg_file is not None:
        merge_cfg_from_file(args.cfg_file)
    if args.opts is not None:
        merge_cfg_from_list(args.opts)
    assert_and_infer_cfg()
    logger.info('Testing with config:')
    logger.info(pprint.pformat(cfg))

    # output_dir = get_output_dir(cfg.TRAIN.DATASETS, training=True)
    output_dir = '/mnt/fcav/self_training/object_detection/upperbound1/train/voc_GTA_caronly_train:cityscapes_caronly_train:voc_GTA_caronly_val/generalized_rcnn'
    files = os.listdir(output_dir)
    for f in files:
        iter_string = re.findall(r'(?<=model_iter)\d+(?=\.pkl)', f)
        if len(iter_string) > 0:
            checkpoint_iter = int(iter_string[0])
            start_iter = checkpoint_iter + 1
            resume_weights_file = f
        # Override the initialization weights with the found checkpoint
        weights_file = os.path.join(output_dir, resume_weights_file)
        logger.info(
            '========> Resuming from checkpoint {} at start iter {}'.format(
                weights_file, start_iter))

    run_inference(
        weights_file,
        ind_range=args.range,
        multi_gpu_testing=args.multi_gpu_testing,
        check_expected_results=True,
    )
コード例 #9
0
ファイル: Test.py プロジェクト: gbegkas/Detectron
    models = []
    afroc_scores = []
    for i in res.keys():
        afroc_scores.append(res[i][dataset]['box']['AFROC'])
        models.append(i)
    for model in model_list_keys:
        if args.last_model >= model >= args.first_model and model > last_model_from_json:
            count += 1
            if count % args.samples != 0:
                continue
            logger.info('Model to test:')
            logger.info(model)
            workspace.ResetWorkspace()
            results, auc_score, afroc_score = run_inference(
                model_list[model],
                ind_range=args.range,
                multi_gpu_testing=args.multi_gpu_testing,
                check_expected_results=True,
            )

            res[model] = results

            with open(
                    os.path.join(
                        cfg.OUTPUT_DIR,
                        'RESULTS_{model}_{dataset}_{iter}.json'.format(
                            model=backbone,
                            dataset=cfg.TEST.DATASETS[0],
                            iter=cfg.SOLVER['MAX_ITER'])), 'w') as fp:
                json.dump(res,
                          fp,
                          indent=1,