Beispiel #1
0
def main(args):
    cfg = get_cfg()
    cfg.merge_from_file(args.cfg_file)
    cfg.merge_from_list(args.opts)
    cfg = infer_cfg(cfg)
    cfg.freeze()
    # logging_rank(cfg)

    if not os.path.isdir(cfg.CKPT):
        mkdir_p(cfg.CKPT)
    setup_logging(cfg.CKPT)

    # Calculate Params & FLOPs & Activations
    n_params, conv_flops, model_flops, conv_activs, model_activs = 0, 0, 0, 0, 0
    if is_main_process() and cfg.MODEL_ANALYSE:
        model = Generalized_CNN(cfg)
        model.eval()
        analyser = Analyser(cfg, model, param_details=False)
        n_params = analyser.get_params()[1]
        conv_flops, model_flops = analyser.get_flops_activs(cfg.TEST.SCALE[0],
                                                            cfg.TEST.SCALE[1],
                                                            mode='flops')
        conv_activs, model_activs = analyser.get_flops_activs(
            cfg.TEST.SCALE[0], cfg.TEST.SCALE[1], mode='activations')
        del model

    synchronize()
    # Create model
    model = Generalized_CNN(cfg)
    logging_rank(model)

    # Load model
    test_weights = get_weights(cfg.CKPT, cfg.TEST.WEIGHTS)
    load_weights(model, test_weights)
    logging_rank('Params: {} | FLOPs: {:.4f}M / Conv_FLOPs: {:.4f}M | '
                 'ACTIVATIONs: {:.4f}M / Conv_ACTIVATIONs: {:.4f}M'.format(
                     n_params, model_flops, conv_flops, model_activs,
                     conv_activs))

    model.eval()
    model.to(torch.device(cfg.DEVICE))

    # Create testing dataset and loader
    datasets = build_dataset(cfg, is_train=False)
    test_loader = make_test_data_loader(cfg, datasets)
    synchronize()

    # Build hooks
    all_hooks = build_test_hooks(args.cfg_file.split('/')[-1],
                                 log_period=1,
                                 num_warmup=0)

    # Build test engine
    test_engine = TestEngine(cfg, model)

    # Test
    test(cfg, test_engine, test_loader, datasets, all_hooks)
Beispiel #2
0
def inference(model, data_loader, logger, device="cuda"):
    predictions = compute_on_dataset(model, data_loader, device)
    synchronize()
    predictions = _accumulate_predictions_from_multiple_gpus(
            predictions, logger)

    if not is_main_process():
        return

    return predictions    
Beispiel #3
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--local_rank", type=int, default=0)
    # parser.add_argument("--iter", "-i", type=int, default=-1)
    args = parser.parse_args()

    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
    distributed = num_gpus > 1

    if distributed:
        torch.cuda.set_device(args.local_rank)
        dist.init_process_group(backend="nccl", init_method="env://")
        synchronize()

    if is_main_process() and not os.path.exists(cfg.PRESENT_DIR):
        os.mkdir(cfg.PRESENT_DIR)
    logger = get_logger(
        cfg.DATASET.NAME, cfg.PRESENT_DIR, args.local_rank, 'present_log.txt')

    # if args.iter == -1:
    #     logger.info("Please designate one iteration.")

    model = MSPN(cfg)
    device = torch.device(cfg.MODEL.DEVICE)
    model.to(cfg.MODEL.DEVICE)

    model_file = "/home/zqr/codes/MSPN/lib/models/mspn_2xstg_coco.pth"
    if os.path.exists(model_file):
        state_dict = torch.load(
            model_file, map_location=lambda storage, loc: storage)
        state_dict = state_dict['model']
        model.load_state_dict(state_dict)

    data_loader = get_present_loader(cfg, num_gpus, args.local_rank, cfg.INFO_PATH,
                                     is_dist=distributed)

    results = inference(model, data_loader, logger, device)
    synchronize()

    if is_main_process():
        logger.info("Dumping results ...")
        results.sort(
            key=lambda res: (res['image_id'], res['score']), reverse=True)
        results_path = os.path.join(cfg.PRESENT_DIR, 'results.json')
        with open(results_path, 'w') as f:
            json.dump(results, f)
        logger.info("Get all results.")
        for res in results:
            data_numpy = cv2.imread(os.path.join(
                cfg.IMG_FOLDER, res['image_id']), cv2.IMREAD_COLOR)
            img = data_loader.ori_dataset.visualize(
                data_numpy, res['keypoints'], res['score'])
            cv2.imwrite(os.path.join(cfg.PRESENT_DIR, res['image_id']), img)
Beispiel #4
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--local_rank", type=int, default=0)
    parser.add_argument("--iter", "-i", type=int, default=-1)
    args = parser.parse_args()

    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
    distributed = num_gpus > 1

    if distributed:
        torch.cuda.set_device(args.local_rank)
        dist.init_process_group(backend="nccl", init_method="env://")
        synchronize()

    if is_main_process() and not os.path.exists(cfg.TEST_DIR):
        os.mkdir(cfg.TEST_DIR)
    logger = get_logger(cfg.DATASET.NAME, cfg.TEST_DIR, args.local_rank,
                        'test_log.txt')

    if args.iter == -1:
        logger.info("Please designate one iteration.")

    model = MSPN(cfg)
    device = torch.device(cfg.MODEL.DEVICE)
    model.to(cfg.MODEL.DEVICE)

    model_file = os.path.join(cfg.OUTPUT_DIR, "iter-{}.pth".format(args.iter))
    if os.path.exists(model_file):
        state_dict = torch.load(model_file,
                                map_location=lambda storage, loc: storage)
        state_dict = state_dict['model']
        model.load_state_dict(state_dict)

    data_loader = get_test_loader(cfg,
                                  num_gpus,
                                  args.local_rank,
                                  'val',
                                  is_dist=distributed)

    results = inference(model, data_loader, logger, device)
    synchronize()

    if is_main_process():
        logger.info("Dumping results ...")
        results.sort(key=lambda res: (res['image_id'], res['score']),
                     reverse=True)
        results_path = os.path.join(cfg.TEST_DIR, 'results.json')
        with open(results_path, 'w') as f:
            json.dump(results, f)
        logger.info("Get all results.")

        data_loader.ori_dataset.evaluate(results_path)
Beispiel #5
0
def _distributed_worker(
    local_rank, main_func, num_gpus, dist_url, args
):
    assert torch.cuda.is_available(), "cuda is not available. Please check your installation."
    try:
        dist.init_process_group(
            backend="NCCL", init_method=dist_url, world_size=num_gpus, rank=local_rank
        )
    except Exception as e:
        logger = logging.getLogger(__name__)
        logger.error("Process group URL: {}".format(dist_url))
        raise e
    # synchronize is needed here to prevent a possible timeout after calling init_process_group
    # See: https://github.com/facebookresearch/maskrcnn-benchmark/issues/172
    synchronize()
    torch.cuda.set_device(local_rank)

    # Setup the local process group (which contains ranks within the same machine)
    assert _LOCAL_PROCESS_GROUP is None
    ranks = list(range(num_gpus))
    pg = dist.new_group(ranks)
    _LOCAL_PROCESS_GROUP = pg

    main_func(*args)
    # Validation args
    val_opt = ValOptions()
    val_args = val_opt.parse()
    val_args.batchsize = 1
    val_args.thread = 0

    gpu_num = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
    train_args.distributed = gpu_num > 1

    # set distributed configs
    if train_args.distributed:
        torch.cuda.set_device(train_args.local_rank)
        torch.distributed.init_process_group(backend="nccl",
                                             init_method="env://")
        synchronize()

    # Set logger
    log_output_dir = cfg.TRAIN.LOG_DIR
    if log_output_dir:
        try:
            os.makedirs(log_output_dir)
        except OSError as e:
            if e.errno != errno.EEXIST:
                raise
    logger = setup_distributed_logger("lib", log_output_dir, get_rank(),
                                      cfg.TRAIN.RUN_NAME + '.txt')
    # tensorboard logger
    tblogger = None
    if train_args.use_tfboard and get_rank() == 0:
        from tensorboardX import SummaryWriter