Ejemplo n.º 1
0
def test(args):
    torch.backends.cudnn.benchmark = True
    logger = logging.setup_logging()
    device = torch.device(f'cuda:0' if torch.cuda.is_available() else 'cpu')
    map_location = {'cuda:%d' % 0: 'cuda:%d' % 0}

    # # 计算RGB
    rgb_cfg = get_cfg_defaults()
    rgb_cfg.merge_from_file(args.rgb_config_file)
    rgb_cfg.DATALOADER.TEST_BATCH_SIZE = 16
    rgb_cfg.OUTPUT.DIR = args.output
    rgb_cfg.freeze()

    rgb_model = build_model(rgb_cfg, 0)
    rgb_model.eval()
    checkpointer = CheckPointer(rgb_model, logger=logger)
    checkpointer.load(args.rgb_pretrained, map_location=map_location)

    # inference(rgb_cfg, rgb_model, device)

    # 计算RGBDiff
    rgbdiff_cfg = get_cfg_defaults()
    rgbdiff_cfg.merge_from_file(args.rgbdiff_config_file)
    rgbdiff_cfg.DATALOADER.TEST_BATCH_SIZE = 16
    rgbdiff_cfg.OUTPUT.DIR = args.output
    rgbdiff_cfg.freeze()

    rgbdiff_model = build_model(rgbdiff_cfg, 0)
    rgbdiff_model.eval()
    checkpointer = CheckPointer(rgbdiff_model, logger=logger)
    checkpointer.load(args.rgbdiff_pretrained, map_location=map_location)

    inference(rgb_cfg, rgb_model, rgbdiff_cfg, rgbdiff_model, device)
Ejemplo n.º 2
0
Archivo: test.py Proyecto: ZJCV/X3D
def test(cfg):
    # Set up environment.
    init_distributed_training(cfg)
    # Set random seed from configs.
    np.random.seed(cfg.RNG_SEED)
    torch.manual_seed(cfg.RNG_SEED)
    torch.backends.cudnn.deterministic = False
    torch.backends.cudnn.benchmark = True

    device = get_device(local_rank=get_local_rank())
    model = build_model(cfg, device=device)

    synchronize()
    do_evaluation(cfg, model, device)
Ejemplo n.º 3
0
def test(cfg):
    torch.backends.cudnn.benchmark = True

    logger = setup_logger('TEST')
    device = torch.device(f'cuda:0' if torch.cuda.is_available() else 'cpu')
    map_location = {'cuda:%d' % 0: 'cuda:%d' % 0}

    model = build_model(cfg, map_location=map_location).to(device)
    if cfg.MODEL.PRETRAINED != "":
        if logger:
            logger.info(f'load pretrained: {cfg.MODEL.PRETRAINED}')
        checkpointer = CheckPointer(model, logger=logger)
        checkpointer.load(cfg.MODEL.PRETRAINED, map_location=map_location)

    do_evaluation(cfg, model, device)
Ejemplo n.º 4
0
    def __init__(self, cfg):
        """
        Args:
            cfg (CfgNode): configs. Details can be found in
                tsn/config/defaults.py
            gpu_id (Optional[int]): GPU id.
        """
        if cfg.NUM_GPUS > 0:
            device = get_device(local_rank=get_local_rank())
        else:
            device = get_device()

        # Build the video model and print model statistics.
        self.model = build_model(cfg, device)
        self.model.eval()
        self.transform = build_transform(cfg, is_train=False)

        self.cfg = cfg
        self.device = device
Ejemplo n.º 5
0
def main():
    global frame_queue, camera, frame, results, threshold, sample_length, \
        data, test_transform, model, device, average_size, label, result_queue, \
        frame_interval

    args = parse_test_args()
    cfg = load_test_config(args)
    average_size = 1
    threshold = 0.5

    np.random.seed(cfg.RNG_SEED)
    torch.manual_seed(cfg.RNG_SEED)
    torch.backends.cudnn.deterministic = False
    torch.backends.cudnn.benchmark = True

    device = get_device(local_rank=get_local_rank())
    model = build_model(cfg, device)
    model.eval()
    camera = cv2.VideoCapture(cfg.VISUALIZATION.INPUT_VIDEO)

    with open(cfg.VISUALIZATION.LABEL_FILE_PATH, 'r') as f:
        label = [line.strip().split(' ')[1] for line in f]

    # prepare test pipeline from non-camera pipeline
    test_transform = build_transform(cfg, is_train=False)
    sample_length = cfg.DATASETS.CLIP_LEN * cfg.DATASETS.NUM_CLIPS * cfg.DATASETS.FRAME_INTERVAL
    frame_interval = cfg.DATASETS.FRAME_INTERVAL

    assert sample_length > 0

    try:
        frame_queue = deque(maxlen=sample_length)
        result_queue = deque(maxlen=1)
        pw = Thread(target=show_results, args=(), daemon=True)
        pr = Thread(target=inference, args=(), daemon=True)
        pw.start()
        pr.start()
        while True:
            if not pw.is_alive():
                exit(0)
    except KeyboardInterrupt:
        pass
Ejemplo n.º 6
0
def train(gpu, args, cfg):
    rank = args.nr * args.gpus + gpu
    setup(rank, args.world_size)

    logger = setup_logger(cfg.TRAIN.NAME)
    arguments = {"iteration": 0}

    torch.cuda.set_device(gpu)
    device = torch.device(f'cuda:{gpu}' if torch.cuda.is_available() else 'cpu')
    map_location = {'cuda:%d' % 0: 'cuda:%d' % rank}

    model = build_model(cfg, gpu, map_location=map_location)
    criterion = build_criterion(cfg)
    optimizer = build_optimizer(cfg, model)
    lr_scheduler = build_lr_scheduler(cfg, optimizer)

    checkpointer = CheckPointer(model, optimizer=optimizer, scheduler=lr_scheduler, save_dir=cfg.OUTPUT.DIR,
                                save_to_disk=True, logger=logger)
    if args.resume:
        if is_master_proc():
            logger.info('resume ...')
        extra_checkpoint_data = checkpointer.load(map_location=map_location, rank=rank)
        if extra_checkpoint_data != dict():
            arguments['iteration'] = extra_checkpoint_data['iteration']
            if cfg.LR_SCHEDULER.IS_WARMUP:
                if is_master_proc():
                    logger.info('warmup ...')
                if lr_scheduler.finished:
                    optimizer.load_state_dict(lr_scheduler.after_scheduler.optimizer.state_dict())
                else:
                    optimizer.load_state_dict(lr_scheduler.optimizer.state_dict())
                lr_scheduler.optimizer = optimizer
                lr_scheduler.after_scheduler.optimizer = optimizer

    data_loader = build_dataloader(cfg, is_train=True, start_iter=arguments['iteration'])

    synchronize()
    do_train(args, cfg, arguments,
             data_loader, model, criterion, optimizer, lr_scheduler,
             checkpointer, device, logger)
    cleanup()
Ejemplo n.º 7
0
Archivo: train.py Proyecto: ZJCV/TRN
def train(gpu, args, cfg):
    rank = args.nr * args.gpus + gpu
    setup(rank, args.world_size, args.gpus)

    logger = setup_logger(cfg.TRAIN.NAME)
    arguments = {"iteration": 0}
    arguments['rank'] = rank

    device = torch.device(
        f'cuda:{gpu}' if torch.cuda.is_available() else 'cpu')
    map_location = {'cuda:%d' % 0: 'cuda:%d' % rank}
    model = build_model(cfg, map_location=map_location).to(device)
    if cfg.MODEL.PRETRAINED != "":
        if rank == 0 and logger:
            logger.info(f'load pretrained: {cfg.MODEL.PRETRAINED}')
        checkpointer = CheckPointer(model, logger=logger)
        checkpointer.load(cfg.MODEL.PRETRAINED,
                          map_location=map_location,
                          rank=rank)

    if args.gpus > 1:
        model = DDP(model, device_ids=[gpu], find_unused_parameters=True)
    criterion = build_criterion(cfg)
    optimizer = build_optimizer(cfg, model)
    lr_scheduler = build_lr_scheduler(cfg, optimizer)

    checkpointer = CheckPointer(model,
                                optimizer=optimizer,
                                scheduler=lr_scheduler,
                                save_dir=cfg.OUTPUT.DIR,
                                save_to_disk=True,
                                logger=logger)
    if args.resume:
        if rank == 0:
            logger.info('resume ...')
        extra_checkpoint_data = checkpointer.load(map_location=map_location,
                                                  rank=rank)
        if extra_checkpoint_data != dict():
            arguments['iteration'] = extra_checkpoint_data['iteration']
            if cfg.LR_SCHEDULER.WARMUP:
                if rank == 0:
                    logger.info('warmup ...')
                if lr_scheduler.finished:
                    optimizer.load_state_dict(
                        lr_scheduler.after_scheduler.optimizer.state_dict())
                else:
                    optimizer.load_state_dict(
                        lr_scheduler.optimizer.state_dict())
                lr_scheduler.optimizer = optimizer
                lr_scheduler.after_scheduler.optimizer = optimizer

    data_loader = build_dataloader(cfg,
                                   train=True,
                                   start_iter=arguments['iteration'],
                                   world_size=args.world_size,
                                   rank=rank)

    model = do_train(args, cfg, arguments, data_loader, model, criterion,
                     optimizer, lr_scheduler, checkpointer, device, logger)

    if rank == 0 and not args.stop_eval:
        logger.info('Start final evaluating...')
        torch.cuda.empty_cache()  # speed up evaluating after training finished
        do_evaluation(cfg, model, device)

    cleanup()