예제 #1
0
def test(args):
    torch.backends.cudnn.benchmark = True
    logger = setup_logger('TEST')
    device = torch.device(f'cuda:0' if torch.cuda.is_available() else 'cpu')
    map_location = {'cuda:%d' % 0: 'cuda:%d' % 0}

    # # 计算RGB
    rgb_cfg = get_cfg_defaults()
    rgb_cfg.merge_from_file(args.rgb_config_file)
    rgb_cfg.DATALOADER.TEST_BATCH_SIZE = 16
    rgb_cfg.OUTPUT.DIR = args.output
    rgb_cfg.freeze()

    rgb_model = build_model(rgb_cfg, map_location=map_location).to(device)
    rgb_model.eval()
    checkpointer = CheckPointer(rgb_model, logger=logger)
    checkpointer.load(args.rgb_pretrained, map_location=map_location)

    # inference(rgb_cfg, rgb_model, device)

    # 计算RGBDiff
    rgbdiff_cfg = get_cfg_defaults()
    rgbdiff_cfg.merge_from_file(args.rgbdiff_config_file)
    rgbdiff_cfg.DATALOADER.TEST_BATCH_SIZE = 16
    rgbdiff_cfg.OUTPUT.DIR = args.output
    rgbdiff_cfg.freeze()

    rgbdiff_model = build_model(rgbdiff_cfg,
                                map_location=map_location).to(device)
    rgbdiff_model.eval()
    checkpointer = CheckPointer(rgbdiff_model, logger=logger)
    checkpointer.load(args.rgbdiff_pretrained, map_location=map_location)

    inference(rgb_cfg, rgb_model, rgbdiff_cfg, rgbdiff_model, device)
예제 #2
0
파일: inference.py 프로젝트: ZJCV/TRN
def inference(cfg, model, device, **kwargs):
    iteration = kwargs.get('iteration', None)
    logger_name = cfg.INFER.NAME
    dataset_name = cfg.DATASETS.TEST.NAME
    output_dir = cfg.OUTPUT.DIR

    data_loader = build_dataloader(cfg, train=False)
    dataset = data_loader.dataset

    logger = setup_logger(logger_name)
    logger.info("Evaluating {} dataset({} video clips):".format(
        dataset_name, len(dataset)))

    results_dict, cate_acc_dict, acc_top1, acc_top5 = compute_on_dataset(
        model, data_loader, device)

    top1_acc = np.mean(acc_top1)
    top5_acc = np.mean(acc_top5)
    result_str = '\ntotal - top_1 acc: {:.3f}, top_5 acc: {:.3f}\n'.format(
        top1_acc, top5_acc)

    classes = dataset.classes
    for key in sorted(results_dict.keys(), key=lambda x: int(x)):
        total_num = results_dict[key]
        acc_num = cate_acc_dict[key]

        cate_name = classes[int(key)]

        if total_num != 0:
            result_str += '{:<3} - {:<20} - acc: {:.2f}\n'.format(
                key, cate_name, acc_num / total_num * 100)
        else:
            result_str += '{:<3} - {:<20} - acc: 0.0\n'.format(
                key, cate_name, acc_num / total_num)
    logger.info(result_str)

    if iteration is not None:
        result_path = os.path.join(output_dir,
                                   'result_{:07d}.txt'.format(iteration))
    else:
        result_path = os.path.join(
            output_dir, 'result_{}.txt'.format(
                datetime.now().strftime('%Y-%m-%d_%H-%M-%S')))
    with open(result_path, "w") as f:
        f.write(result_str)

    for handler in logger.handlers:
        logger.removeHandler(handler)

    return {'top1': top1_acc, 'top5': top5_acc}
예제 #3
0
파일: test.py 프로젝트: ZJCV/SlowFast
def test(cfg):
    torch.backends.cudnn.benchmark = True

    logger = setup_logger('TEST')
    device = torch.device(f'cuda:0' if torch.cuda.is_available() else 'cpu')
    map_location = {'cuda:%d' % 0: 'cuda:%d' % 0}

    model = build_model(cfg, map_location=map_location).to(device)
    if cfg.MODEL.PRETRAINED != "":
        if logger:
            logger.info(f'load pretrained: {cfg.MODEL.PRETRAINED}')
        checkpointer = CheckPointer(model, logger=logger)
        checkpointer.load(cfg.MODEL.PRETRAINED, map_location=map_location)

    do_evaluation(cfg, model, device)
예제 #4
0
def main():
    args = parse_train_args()
    cfg = load_config(args)

    logger = setup_logger("TSN", save_dir=cfg.OUTPUT.DIR)
    logger.info(args)

    logger.info("Environment info:\n" + collect_env_info())
    logger.info("Loaded configuration file {}".format(args.config_file))
    if args.config_file:
        with open(args.config_file, "r") as cf:
            config_str = "\n" + cf.read()
            logger.info(config_str)
    logger.info("Running with config:\n{}".format(cfg))

    launch_job(args, cfg, train)
예제 #5
0
def train(gpu, args, cfg):
    rank = args.nr * args.gpus + gpu
    setup(rank, args.world_size)

    logger = setup_logger(cfg.TRAIN.NAME)
    arguments = {"iteration": 0}

    torch.cuda.set_device(gpu)
    device = torch.device(f'cuda:{gpu}' if torch.cuda.is_available() else 'cpu')
    map_location = {'cuda:%d' % 0: 'cuda:%d' % rank}

    model = build_model(cfg, gpu, map_location=map_location)
    criterion = build_criterion(cfg)
    optimizer = build_optimizer(cfg, model)
    lr_scheduler = build_lr_scheduler(cfg, optimizer)

    checkpointer = CheckPointer(model, optimizer=optimizer, scheduler=lr_scheduler, save_dir=cfg.OUTPUT.DIR,
                                save_to_disk=True, logger=logger)
    if args.resume:
        if is_master_proc():
            logger.info('resume ...')
        extra_checkpoint_data = checkpointer.load(map_location=map_location, rank=rank)
        if extra_checkpoint_data != dict():
            arguments['iteration'] = extra_checkpoint_data['iteration']
            if cfg.LR_SCHEDULER.IS_WARMUP:
                if is_master_proc():
                    logger.info('warmup ...')
                if lr_scheduler.finished:
                    optimizer.load_state_dict(lr_scheduler.after_scheduler.optimizer.state_dict())
                else:
                    optimizer.load_state_dict(lr_scheduler.optimizer.state_dict())
                lr_scheduler.optimizer = optimizer
                lr_scheduler.after_scheduler.optimizer = optimizer

    data_loader = build_dataloader(cfg, is_train=True, start_iter=arguments['iteration'])

    synchronize()
    do_train(args, cfg, arguments,
             data_loader, model, criterion, optimizer, lr_scheduler,
             checkpointer, device, logger)
    cleanup()
예제 #6
0
def main():
    parser = argparse.ArgumentParser(description='TSN Test With PyTorch')
    parser.add_argument("rgb_config_file",
                        default="",
                        metavar="RGB_CONFIG_FILE",
                        help="path to config file",
                        type=str)
    parser.add_argument('rgb_pretrained',
                        default="",
                        metavar='RGB_PRETRAINED_FILE',
                        help="path to pretrained model",
                        type=str)
    parser.add_argument("rgbdiff_config_file",
                        default="",
                        metavar="RGBDIFF_CONFIG_FILE",
                        help="path to config file",
                        type=str)
    parser.add_argument('rgbdiff_pretrained',
                        default="",
                        metavar='RGBDIFF_PRETRAINED_FILE',
                        help="path to pretrained model",
                        type=str)
    parser.add_argument('--output', default="./outputs/test", type=str)
    args = parser.parse_args()

    if not os.path.isfile(args.rgb_config_file) and not os.path.isfile(
            args.rgb_pretrained):
        raise ValueError('需要输入RGB模态配置文件和预训练模型路径')
    if not os.path.isfile(args.rgbdiff_config_file) or not os.path.isfile(
            args.rgbdiff_pretrained):
        raise ValueError('需要输入RGBDIFF模态配置文件和预训练模型路径')

    if not os.path.exists(args.output):
        os.makedirs(args.output)
    logger = setup_logger("TSN", save_dir=args.output)
    logger.info(args)
    logger.info("Environment info:\n" + collect_env_info())

    test(args)
예제 #7
0
파일: test.py 프로젝트: ZJCV/SlowFast
def main():
    parser = argparse.ArgumentParser(description='TSN Test With PyTorch')
    parser.add_argument("config_file", default="", metavar="CONFIG_FILE",
                        help="path to config file", type=str)
    parser.add_argument('pretrained', default="", metavar='PRETRAINED_FILE',
                        help="path to pretrained model", type=str)
    parser.add_argument('--output', default="./outputs/test", type=str)
    parser.add_argument(
        "opts",
        help="Modify config options using the command-line",
        default=None,
        nargs=argparse.REMAINDER,
    )
    args = parser.parse_args()

    if not os.path.isfile(args.config_file) or not os.path.isfile(args.pretrained):
        raise ValueError('需要输入配置文件和预训练模型路径')

    cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.MODEL.PRETRAINED = args.pretrained
    cfg.OUTPUT.DIR = args.output
    cfg.freeze()

    if not os.path.exists(cfg.OUTPUT.DIR):
        os.makedirs(cfg.OUTPUT.DIR)
    logger = setup_logger("TSN", save_dir=cfg.OUTPUT.DIR)
    logger.info(args)

    logger.info("Environment info:\n" + collect_env_info())
    logger.info("Loaded configuration file {}".format(args.config_file))
    if args.config_file:
        with open(args.config_file, "r") as cf:
            config_str = "\n" + cf.read()
            logger.info(config_str)
    logger.info("Running with config:\n{}".format(cfg))

    test(cfg)
예제 #8
0
파일: train.py 프로젝트: ZJCV/TRN
def main():
    parser = argparse.ArgumentParser(description='TSN Training With PyTorch')
    parser.add_argument("--config_file",
                        default="",
                        metavar="FILE",
                        help="path to config file",
                        type=str)
    parser.add_argument('--log_step',
                        default=10,
                        type=int,
                        help='Print logs every log_step')
    parser.add_argument('--save_step',
                        default=2500,
                        type=int,
                        help='Save checkpoint every save_step')
    parser.add_argument('--stop_save', default=False, action='store_true')
    parser.add_argument(
        '--eval_step',
        default=2500,
        type=int,
        help='Evaluate dataset every eval_step, disabled when eval_step < 0')
    parser.add_argument('--stop_eval', default=False, action='store_true')
    parser.add_argument('--resume',
                        default=False,
                        action='store_true',
                        help='Resume training')
    parser.add_argument('--use_tensorboard', default=1, type=int)

    parser.add_argument('-n',
                        '--nodes',
                        default=1,
                        type=int,
                        metavar='N',
                        help='number of machines (default: 1)')
    parser.add_argument('-g',
                        '--gpus',
                        default=1,
                        type=int,
                        help='number of gpus per node')
    parser.add_argument('-nr',
                        '--nr',
                        default=0,
                        type=int,
                        help='ranking within the nodes')

    parser.add_argument(
        "opts",
        help="Modify config options using the command-line",
        default=None,
        nargs=argparse.REMAINDER,
    )

    args = parser.parse_args()
    if args.config_file:
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    if not os.path.exists(cfg.OUTPUT.DIR):
        os.makedirs(cfg.OUTPUT.DIR)
    logger = setup_logger("TSN", save_dir=cfg.OUTPUT.DIR)
    logger.info(args)

    logger.info("Environment info:\n" + collect_env_info())
    logger.info("Loaded configuration file {}".format(args.config_file))
    if args.config_file:
        with open(args.config_file, "r") as cf:
            config_str = "\n" + cf.read()
            logger.info(config_str)
    logger.info("Running with config:\n{}".format(cfg))

    args.world_size = args.gpus * args.nodes
    os.environ['MASTER_ADDR'] = 'localhost'
    os.environ['MASTER_PORT'] = '14028'
    mp.spawn(train, nprocs=args.gpus, args=(args, cfg))
예제 #9
0
파일: train.py 프로젝트: ZJCV/TRN
def train(gpu, args, cfg):
    rank = args.nr * args.gpus + gpu
    setup(rank, args.world_size, args.gpus)

    logger = setup_logger(cfg.TRAIN.NAME)
    arguments = {"iteration": 0}
    arguments['rank'] = rank

    device = torch.device(
        f'cuda:{gpu}' if torch.cuda.is_available() else 'cpu')
    map_location = {'cuda:%d' % 0: 'cuda:%d' % rank}
    model = build_model(cfg, map_location=map_location).to(device)
    if cfg.MODEL.PRETRAINED != "":
        if rank == 0 and logger:
            logger.info(f'load pretrained: {cfg.MODEL.PRETRAINED}')
        checkpointer = CheckPointer(model, logger=logger)
        checkpointer.load(cfg.MODEL.PRETRAINED,
                          map_location=map_location,
                          rank=rank)

    if args.gpus > 1:
        model = DDP(model, device_ids=[gpu], find_unused_parameters=True)
    criterion = build_criterion(cfg)
    optimizer = build_optimizer(cfg, model)
    lr_scheduler = build_lr_scheduler(cfg, optimizer)

    checkpointer = CheckPointer(model,
                                optimizer=optimizer,
                                scheduler=lr_scheduler,
                                save_dir=cfg.OUTPUT.DIR,
                                save_to_disk=True,
                                logger=logger)
    if args.resume:
        if rank == 0:
            logger.info('resume ...')
        extra_checkpoint_data = checkpointer.load(map_location=map_location,
                                                  rank=rank)
        if extra_checkpoint_data != dict():
            arguments['iteration'] = extra_checkpoint_data['iteration']
            if cfg.LR_SCHEDULER.WARMUP:
                if rank == 0:
                    logger.info('warmup ...')
                if lr_scheduler.finished:
                    optimizer.load_state_dict(
                        lr_scheduler.after_scheduler.optimizer.state_dict())
                else:
                    optimizer.load_state_dict(
                        lr_scheduler.optimizer.state_dict())
                lr_scheduler.optimizer = optimizer
                lr_scheduler.after_scheduler.optimizer = optimizer

    data_loader = build_dataloader(cfg,
                                   train=True,
                                   start_iter=arguments['iteration'],
                                   world_size=args.world_size,
                                   rank=rank)

    model = do_train(args, cfg, arguments, data_loader, model, criterion,
                     optimizer, lr_scheduler, checkpointer, device, logger)

    if rank == 0 and not args.stop_eval:
        logger.info('Start final evaluating...')
        torch.cuda.empty_cache()  # speed up evaluating after training finished
        do_evaluation(cfg, model, device)

    cleanup()