Exemple #1
0
def main():
    # 1. Get input arguments
    args = get_args()

    # 2. Create config instance from args above
    cfg = get_default_config()
    cfg.use_gpu = torch.cuda.is_available()
    if args.config_file:
        cfg.merge_from_file(args.config_file)
    reset_config(cfg, args)
    cfg.merge_from_list(args.opts)
    set_random_seed(cfg.train.seed)

    log_name = 'test.log' if cfg.test.evaluate else 'train.log'
    log_name += time.strftime('-%Y-%m-%d-%H-%M-%S')
    sys.stdout = Logger(osp.join(cfg.data.save_dir, log_name))

    print('Show configuration\n{}\n'.format(cfg))
    print('Collecting env info ...')
    print('** System info **\n{}\n'.format(collect_env_info()))

    if cfg.use_gpu:
        torch.backends.cudnn.benchmark = True

    # 3. Create DataManager Instance
    datamanager = build_datamanager(cfg)

    print('Building model: {}'.format(cfg.model.name))
    model = torchreid.models.build_model(
        name=cfg.model.name,
        num_classes=datamanager.num_train_pids,
        loss=cfg.loss.name,
        pretrained=cfg.model.pretrained,
        use_gpu=cfg.use_gpu)
    num_params, flops = compute_model_complexity(
        model, (1, 3, cfg.data.height, cfg.data.width))
    print('Model complexity: params={:,} flops={:,}'.format(num_params, flops))

    if cfg.model.load_weights and check_isfile(cfg.model.load_weights):
        load_pretrained_weights(model, cfg.model.load_weights)

    if cfg.use_gpu:
        model = nn.DataParallel(model).cuda()

    optimizer = torchreid.optim.build_optimizer(model, **optimizer_kwargs(cfg))
    scheduler = torchreid.optim.build_lr_scheduler(optimizer,
                                                   **lr_scheduler_kwargs(cfg))

    if cfg.model.resume and check_isfile(cfg.model.resume):
        cfg.train.start_epoch = resume_from_checkpoint(cfg.model.resume,
                                                       model,
                                                       optimizer=optimizer,
                                                       scheduler=scheduler)

    print('Building {}-engine for {}-reid'.format(cfg.loss.name,
                                                  cfg.data.type))

    # Build engine and run
    engine = build_engine(cfg, datamanager, model, optimizer, scheduler)
    engine.run(**engine_run_kwargs(cfg))
Exemple #2
0
def main():
    global args

    set_random_seed(args.seed)
    if not args.use_avai_gpus:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    use_gpu = torch.cuda.is_available() and not args.use_cpu
    log_name = 'test.log' if args.evaluate else 'train.log'
    log_name += time.strftime('-%Y-%m-%d-%H-%M-%S')
    sys.stdout = Logger(osp.join(args.save_dir, log_name))
    print('** Arguments **')
    arg_keys = list(args.__dict__.keys())
    arg_keys.sort()
    for key in arg_keys:
        print('{}: {}'.format(key, args.__dict__[key]))
    print('\n')
    print('Collecting env info ...')
    print('** System info **\n{}\n'.format(collect_env_info()))
    if use_gpu:
        torch.backends.cudnn.benchmark = True
    else:
        warnings.warn(
            'Currently using CPU, however, GPU is highly recommended')

    datamanager = build_datamanager(args)

    print('Building model: {}'.format(args.arch))
    model = torchreid.models.build_model(
        name=args.arch,
        num_classes=datamanager.num_train_pids,
        loss=args.loss.lower(),
        pretrained=(not args.no_pretrained),
        use_gpu=use_gpu)
    num_params, flops = compute_model_complexity(
        model, (1, 3, args.height, args.width))
    print('Model complexity: params={:,} flops={:,}'.format(num_params, flops))

    if args.load_weights and check_isfile(args.load_weights):
        load_pretrained_weights(model, args.load_weights)

    if use_gpu:
        model = nn.DataParallel(model).cuda()

    optimizer = torchreid.optim.build_optimizer(model,
                                                **optimizer_kwargs(args))

    scheduler = torchreid.optim.build_lr_scheduler(optimizer,
                                                   **lr_scheduler_kwargs(args))

    if args.resume and check_isfile(args.resume):
        args.start_epoch = resume_from_checkpoint(args.resume,
                                                  model,
                                                  optimizer=optimizer)

    print('Building {}-engine for {}-reid'.format(args.loss, args.app))
    engine = build_engine(args, datamanager, model, optimizer, scheduler)

    engine.run(**engine_run_kwargs(args))
Exemple #3
0
def main():
    parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--config-file', type=str, default='', help='path to config file')
    parser.add_argument('-s', '--sources', type=str, nargs='+', help='source datasets (delimited by space)')
    parser.add_argument('-t', '--targets', type=str, nargs='+', help='target datasets (delimited by space)')
    parser.add_argument('--root', type=str, default='', help='path to data root')
    parser.add_argument('opts', default=None, nargs=argparse.REMAINDER,
                        help='Modify config options using the command-line')
    args = parser.parse_args()

    cfg = get_default_config()
    cfg.use_gpu = torch.cuda.is_available()
    if args.config_file:
        cfg.merge_from_file(args.config_file)
    reset_config(cfg, args)
    cfg.merge_from_list(args.opts)
    set_random_seed(cfg.train.seed)

    log_name = 'test.log' if cfg.test.evaluate else 'train.log'
    log_name += time.strftime('-%Y-%m-%d-%H-%M-%S')
    sys.stdout = Logger(osp.join(cfg.data.save_dir, log_name))

    print('Show configuration\n{}\n'.format(cfg))
    print('Collecting env info ...')
    print('** System info **\n{}\n'.format(collect_env_info()))

    if cfg.use_gpu:
        torch.backends.cudnn.benchmark = True

    datamanager = build_datamanager(cfg)

    print('Building model: {}'.format(cfg.model.name))
    model = torchreid.models.build_model(**model_kwargs(cfg, datamanager.num_train_pids))
    num_params, flops = compute_model_complexity(model, (1, 3, cfg.data.height, cfg.data.width))
    print('Model complexity: params={:,} flops={:,}'.format(num_params, flops))

    if cfg.model.load_weights and check_isfile(cfg.model.load_weights):
        if cfg.model.pretrained and not cfg.test.evaluate:
            state_dict = torch.load(cfg.model.load_weights)
            model.load_pretrained_weights(state_dict)
        else:
            load_pretrained_weights(model, cfg.model.load_weights)

    if cfg.use_gpu:
        model = nn.DataParallel(model).cuda()

    optimizer = torchreid.optim.build_optimizer(model, **optimizer_kwargs(cfg))
    scheduler = torchreid.optim.build_lr_scheduler(optimizer, **lr_scheduler_kwargs(cfg))

    if cfg.model.resume and check_isfile(cfg.model.resume):
        cfg.train.start_epoch = resume_from_checkpoint(
            cfg.model.resume, model, optimizer=optimizer, scheduler=scheduler
        )

    print('Building {}-engine for {}-reid'.format(cfg.loss.name, cfg.data.type))
    engine = build_engine(cfg, datamanager, model, optimizer, scheduler)
    engine.run(**engine_run_kwargs(cfg))
Exemple #4
0
def main():
    global args

    set_random_seed(args.seed)
    if not args.use_avai_gpus:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    use_gpu = (torch.cuda.is_available() and not args.use_cpu)
    log_name = 'test.log' if args.evaluate else 'train.log'
    sys.stdout = Logger(osp.join(args.save_dir, log_name))
    print('==========\nArgs:{}\n=========='.format(args))
    if use_gpu:
        print('Currently using GPU {}'.format(args.gpu_devices))
        torch.backends.cudnn.benchmark = True
    else:
        warnings.warn(
            'Currently using CPU, however, GPU is highly recommended')

    datamanager = build_datamanager(args)

    print('Building model: {}'.format(args.arch))
    model = torchreid.models.build_model(
        name=args.arch,
        num_classes=datamanager.num_train_pids,
        loss=args.loss.lower(),
        pretrained=(not args.no_pretrained),
        use_gpu=use_gpu)
    num_params, flops = compute_model_complexity(
        model, (1, 3, args.height, args.width))
    print('Model complexity: params={:,} flops={:,}'.format(num_params, flops))

    if args.load_weights and check_isfile(args.load_weights):
        load_pretrained_weights(model, args.load_weights)

    if use_gpu:
        model = nn.DataParallel(model).cuda()

    optimizer = torchreid.optim.build_optimizer(model,
                                                **optimizer_kwargs(args))

    scheduler = torchreid.optim.build_lr_scheduler(optimizer,
                                                   **lr_scheduler_kwargs(args))

    if args.resume and check_isfile(args.resume):
        args.start_epoch = resume_from_checkpoint(args.resume,
                                                  model,
                                                  optimizer=optimizer)

    print('Building {}-engine for {}-reid'.format(args.loss, args.app))
    engine = build_engine(args, datamanager, model, optimizer, scheduler)

    engine.run(**engine_run_kwargs(args))
Exemple #5
0
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter
    )
    parser.add_argument(
        '--config-file', type=str, default='', help='path to config file'
    )
    parser.add_argument(
        '-s',
        '--sources',
        type=str,
        nargs='+',
        help='source datasets (delimited by space)'
    )
    parser.add_argument(
        '-t',
        '--targets',
        type=str,
        nargs='+',
        help='target datasets (delimited by space)'
    )
    parser.add_argument(
        '--transforms', type=str, nargs='+', help='data augmentation'
    )
    parser.add_argument(
        '--root', type=str, default='', help='path to data root'
    )
    parser.add_argument(
        '--gpu-devices',
        type=str,
        default='',
    )
    parser.add_argument(
        'opts',
        default=None,
        nargs=argparse.REMAINDER,
        help='Modify config options using the command-line'
    )
    args = parser.parse_args()

    cfg = get_default_config()
    cfg.use_gpu = torch.cuda.is_available()
    if args.config_file:
        cfg.merge_from_file(args.config_file)
    reset_config(cfg, args)
    cfg.merge_from_list(args.opts)
    set_random_seed(cfg.train.seed)

    if cfg.use_gpu and args.gpu_devices:
        # if gpu_devices is not specified, all available gpus will be used
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    log_name = 'test.log' if cfg.test.evaluate else 'train.log'
    log_name += time.strftime('-%Y-%m-%d-%H-%M-%S')
    sys.stdout = Logger(osp.join(cfg.data.save_dir, log_name))

    print('Show configuration\n{}\n'.format(cfg))
    print('Collecting env info ...')
    print('** System info **\n{}\n'.format(collect_env_info()))

    if cfg.use_gpu:
        torch.backends.cudnn.benchmark = True

    datamanager = torchreid.data.ImageDataManager(**imagedata_kwargs(cfg))

    print('Building model: {}'.format(cfg.model.name))
    model = osnet_models.build_model(
        cfg.model.name, num_classes=datamanager.num_train_pids
    )
    num_params, flops = compute_model_complexity(
        model, (1, 3, cfg.data.height, cfg.data.width)
    )
    print('Model complexity: params={:,} flops={:,}'.format(num_params, flops))

    if cfg.use_gpu:
        model = nn.DataParallel(model).cuda()

    optimizer = torchreid.optim.build_optimizer(model, **optimizer_kwargs(cfg))
    scheduler = torchreid.optim.build_lr_scheduler(
        optimizer, **lr_scheduler_kwargs(cfg)
    )

    if cfg.model.resume and check_isfile(cfg.model.resume):
        cfg.train.start_epoch = resume_from_checkpoint(
            cfg.model.resume, model, optimizer=optimizer
        )

    print('Building NAS engine')
    engine = ImageSoftmaxNASEngine(
        datamanager,
        model,
        optimizer,
        scheduler=scheduler,
        use_gpu=cfg.use_gpu,
        label_smooth=cfg.loss.softmax.label_smooth,
        mc_iter=cfg.nas.mc_iter,
        init_lmda=cfg.nas.init_lmda,
        min_lmda=cfg.nas.min_lmda,
        lmda_decay_step=cfg.nas.lmda_decay_step,
        lmda_decay_rate=cfg.nas.lmda_decay_rate,
        fixed_lmda=cfg.nas.fixed_lmda
    )
    engine.run(**engine_run_kwargs(cfg))

    print('*** Display the found architecture ***')
    if cfg.use_gpu:
        model.module.build_child_graph()
    else:
        model.build_child_graph()
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--config-file',
                        type=str,
                        default='',
                        help='path to config file')
    parser.add_argument('-s',
                        '--sources',
                        type=str,
                        nargs='+',
                        help='source datasets (delimited by space)')
    parser.add_argument('-t',
                        '--targets',
                        type=str,
                        nargs='+',
                        help='target datasets (delimited by space)')
    parser.add_argument('--transforms',
                        type=str,
                        nargs='+',
                        help='data augmentation')
    parser.add_argument('--root',
                        type=str,
                        default='',
                        help='path to data root')
    parser.add_argument('opts',
                        default=None,
                        nargs=argparse.REMAINDER,
                        help='Modify config options using the command-line')
    args = parser.parse_args()

    cfg = get_default_config()
    cfg.use_gpu = torch.cuda.is_available()
    if args.config_file:
        cfg.merge_from_file(args.config_file)
    reset_config(cfg, args)
    cfg.merge_from_list(args.opts)
    set_random_seed(cfg.train.seed)

    log_name = 'test.log' if cfg.test.evaluate else 'train.log'
    log_name += time.strftime('-%Y-%m-%d-%H-%M-%S')
    sys.stdout = Logger(osp.join(cfg.data.save_dir, log_name))

    print('Show configuration\n{}\n'.format(cfg))
    print('Collecting env info ...')
    print('** System info **\n{}\n'.format(collect_env_info()))

    if cfg.use_gpu:
        torch.backends.cudnn.benchmark = True

    datamanager = torchreid.data.ImageDataManager(**imagedata_kwargs(cfg))

    print('Building model-1: {}'.format(cfg.model.name))
    model1 = torchreid.models.build_model(
        name=cfg.model.name,
        num_classes=datamanager.num_train_pids,
        loss=cfg.loss.name,
        pretrained=cfg.model.pretrained,
        use_gpu=cfg.use_gpu)
    num_params, flops = compute_model_complexity(
        model1, (1, 3, cfg.data.height, cfg.data.width))
    print('Model complexity: params={:,} flops={:,}'.format(num_params, flops))

    print('Copying model-1 to model-2')
    model2 = copy.deepcopy(model1)

    if cfg.model.load_weights1 and check_isfile(cfg.model.load_weights1):
        load_pretrained_weights(model1, cfg.model.load_weights1)

    if cfg.model.load_weights2 and check_isfile(cfg.model.load_weights2):
        load_pretrained_weights(model2, cfg.model.load_weights2)

    if cfg.use_gpu:
        model1 = nn.DataParallel(model1).cuda()
        model2 = nn.DataParallel(model2).cuda()

    optimizer1 = torchreid.optim.build_optimizer(model1,
                                                 **optimizer_kwargs(cfg))
    scheduler1 = torchreid.optim.build_lr_scheduler(optimizer1,
                                                    **lr_scheduler_kwargs(cfg))

    optimizer2 = torchreid.optim.build_optimizer(model2,
                                                 **optimizer_kwargs(cfg))
    scheduler2 = torchreid.optim.build_lr_scheduler(optimizer2,
                                                    **lr_scheduler_kwargs(cfg))

    if cfg.model.resume1 and check_isfile(cfg.model.resume1):
        cfg.train.start_epoch = resume_from_checkpoint(cfg.model.resume1,
                                                       model1,
                                                       optimizer=optimizer1,
                                                       scheduler=scheduler1)

    if cfg.model.resume2 and check_isfile(cfg.model.resume2):
        resume_from_checkpoint(cfg.model.resume2,
                               model2,
                               optimizer=optimizer2,
                               scheduler=scheduler2)

    print('Building DML-engine for image-reid')
    engine = ImageDMLEngine(datamanager,
                            model1,
                            optimizer1,
                            scheduler1,
                            model2,
                            optimizer2,
                            scheduler2,
                            margin=cfg.loss.triplet.margin,
                            weight_t=cfg.loss.triplet.weight_t,
                            weight_x=cfg.loss.triplet.weight_x,
                            weight_ml=cfg.loss.dml.weight_ml,
                            use_gpu=cfg.use_gpu,
                            label_smooth=cfg.loss.softmax.label_smooth,
                            deploy=cfg.model.deploy)
    engine.run(**engine_run_kwargs(cfg))
Exemple #7
0
def main():
    parser = build_base_argparser()
    parser.add_argument('-e',
                        '--auxiliary-models-cfg',
                        type=str,
                        nargs='*',
                        default='',
                        help='path to extra config files')
    parser.add_argument('--split-models',
                        action='store_true',
                        help='whether to split models on own gpu')
    parser.add_argument('--enable_quantization',
                        action='store_true',
                        help='Enable NNCF quantization algorithm')
    parser.add_argument('--enable_pruning',
                        action='store_true',
                        help='Enable NNCF pruning algorithm')
    parser.add_argument(
        '--aux-config-opts',
        nargs='+',
        default=None,
        help='Modify aux config options using the command-line')
    args = parser.parse_args()

    cfg = get_default_config()
    cfg.use_gpu = torch.cuda.is_available() and args.gpu_num > 0
    if args.config_file:
        merge_from_files_with_base(cfg, args.config_file)
    reset_config(cfg, args)
    cfg.merge_from_list(args.opts)

    is_nncf_used = args.enable_quantization or args.enable_pruning
    if is_nncf_used:
        print(f'Using NNCF -- making NNCF changes in config')
        cfg = make_nncf_changes_in_config(cfg, args.enable_quantization,
                                          args.enable_pruning, args.opts)

    set_random_seed(cfg.train.seed, cfg.train.deterministic)

    log_name = 'test.log' if cfg.test.evaluate else 'train.log'
    log_name += time.strftime('-%Y-%m-%d-%H-%M-%S')
    sys.stdout = Logger(osp.join(cfg.data.save_dir, log_name))

    print('Show configuration\n{}\n'.format(cfg))

    if cfg.use_gpu:
        torch.backends.cudnn.benchmark = True

    num_aux_models = len(cfg.mutual_learning.aux_configs)
    datamanager = build_datamanager(cfg, args.classes)
    num_train_classes = datamanager.num_train_pids

    print('Building main model: {}'.format(cfg.model.name))
    model = torchreid.models.build_model(
        **model_kwargs(cfg, num_train_classes))
    macs, num_params = get_model_complexity_info(
        model, (3, cfg.data.height, cfg.data.width),
        as_strings=False,
        verbose=False,
        print_per_layer_stat=False)
    print('Main model complexity: params={:,} flops={:,}'.format(
        num_params, macs * 2))

    aux_lr = cfg.train.lr  # placeholder, needed for aux models, may be filled by nncf part below
    if is_nncf_used:
        print('Begin making NNCF changes in model')
        if cfg.use_gpu:
            model.cuda()

        compression_ctrl, model, cfg, aux_lr, nncf_metainfo = \
            make_nncf_changes_in_training(model, cfg,
                                          args.classes,
                                          args.opts)

        should_freeze_aux_models = True
        print(f'should_freeze_aux_models = {should_freeze_aux_models}')
        print('End making NNCF changes in model')
    else:
        compression_ctrl = None
        should_freeze_aux_models = False
        nncf_metainfo = None
    # creating optimizer and scheduler -- it should be done after NNCF part, since
    # NNCF could change some parameters
    optimizer = torchreid.optim.build_optimizer(model, **optimizer_kwargs(cfg))

    if cfg.lr_finder.enable and not cfg.model.resume:
        scheduler = None
    else:
        scheduler = torchreid.optim.build_lr_scheduler(
            optimizer=optimizer,
            num_iter=datamanager.num_iter,
            **lr_scheduler_kwargs(cfg))
    # Loading model (and optimizer and scheduler in case of resuming training).
    # Note that if NNCF is used, loading is done inside NNCF part, so loading here is not required.
    if cfg.model.resume and check_isfile(
            cfg.model.resume) and not is_nncf_used:
        device_ = 'cuda' if cfg.use_gpu else 'cpu'
        cfg.train.start_epoch = resume_from_checkpoint(cfg.model.resume,
                                                       model,
                                                       optimizer=optimizer,
                                                       scheduler=scheduler,
                                                       device=device_)
    elif cfg.model.load_weights and not is_nncf_used:
        load_pretrained_weights(model, cfg.model.load_weights)

    if cfg.model.type == 'classification':
        check_classification_classes(model,
                                     datamanager,
                                     args.classes,
                                     test_only=cfg.test.evaluate)

    model, extra_device_ids = put_main_model_on_the_device(
        model, cfg.use_gpu, args.gpu_num, num_aux_models, args.split_models)

    if cfg.lr_finder.enable and not cfg.test.evaluate and not cfg.model.resume:
        aux_lr, model, optimizer, scheduler = run_lr_finder(
            cfg,
            datamanager,
            model,
            optimizer,
            scheduler,
            args.classes,
            rebuild_model=True,
            gpu_num=args.gpu_num,
            split_models=args.split_models)

    log_dir = cfg.data.tb_log_dir if cfg.data.tb_log_dir else cfg.data.save_dir
    run_training(cfg,
                 datamanager,
                 model,
                 optimizer,
                 scheduler,
                 extra_device_ids,
                 aux_lr,
                 tb_writer=SummaryWriter(log_dir=log_dir),
                 should_freeze_aux_models=should_freeze_aux_models,
                 nncf_metainfo=nncf_metainfo,
                 compression_ctrl=compression_ctrl)
def main():
    parser = build_base_argparser()
    args = parser.parse_args()

    cfg = get_default_config()
    cfg.use_gpu = torch.cuda.is_available() and args.gpu_num > 0
    if args.config_file:
        merge_from_files_with_base(cfg, args.config_file)
    reset_config(cfg, args)
    cfg.merge_from_list(args.opts)

    is_ie_model = cfg.model.load_weights.endswith('.xml')
    if not is_ie_model:
        compression_hyperparams = get_compression_hyperparams(
            cfg.model.load_weights)
        is_nncf_used = compression_hyperparams[
            'enable_quantization'] or compression_hyperparams['enable_pruning']

        if is_nncf_used:
            print(f'Using NNCF -- making NNCF changes in config')
            cfg = make_nncf_changes_in_config(
                cfg, compression_hyperparams['enable_quantization'],
                compression_hyperparams['enable_pruning'], args.opts)
    else:
        is_nncf_used = False

    set_random_seed(cfg.train.seed)

    log_name = 'test.log' + time.strftime('-%Y-%m-%d-%H-%M-%S')
    sys.stdout = Logger(osp.join(cfg.data.save_dir, log_name))
    datamanager = torchreid.data.ImageDataManager(filter_classes=args.classes,
                                                  **imagedata_kwargs(cfg))
    num_classes = len(
        datamanager.test_loader[cfg.data.targets[0]]['query'].dataset.classes)
    cfg.train.ema.enable = False
    if not is_ie_model:
        model = torchreid.models.build_model(**model_kwargs(cfg, num_classes))
        load_pretrained_weights(model, cfg.model.load_weights)
        if is_nncf_used:
            print('Begin making NNCF changes in model')
            model = make_nncf_changes_in_eval(model, cfg)
            print('End making NNCF changes in model')
        if cfg.use_gpu:
            num_devices = min(torch.cuda.device_count(), args.gpu_num)
            main_device_ids = list(range(num_devices))
            model = DataParallel(model,
                                 device_ids=main_device_ids,
                                 output_device=0).cuda(main_device_ids[0])
    else:
        from torchreid.utils.ie_tools import VectorCNN
        from openvino.inference_engine import IECore
        cfg.test.batch_size = 1
        model = VectorCNN(IECore(),
                          cfg.model.load_weights,
                          'CPU',
                          switch_rb=True,
                          **model_kwargs(cfg, num_classes))
        for _, dataloader in datamanager.test_loader.items():
            dataloader['query'].dataset.transform.transforms = \
                dataloader['query'].dataset.transform.transforms[:-2]

    if cfg.model.type == 'classification':
        check_classification_classes(model,
                                     datamanager,
                                     args.classes,
                                     test_only=True)

    engine = build_engine(cfg=cfg,
                          datamanager=datamanager,
                          model=model,
                          optimizer=None,
                          scheduler=None)
    engine.test(0,
                dist_metric=cfg.test.dist_metric,
                normalize_feature=cfg.test.normalize_feature,
                visrank=cfg.test.visrank,
                visrank_topk=cfg.test.visrank_topk,
                save_dir=cfg.data.save_dir,
                use_metric_cuhk03=cfg.cuhk03.use_metric_cuhk03,
                ranks=(1, 5, 10, 20),
                rerank=cfg.test.rerank)
Exemple #9
0
def main():
    global args

    set_random_seed(args.seed)
    use_gpu = torch.cuda.is_available() and not args.use_cpu
    log_name = 'test.log' if args.evaluate else 'train.log'
    sys.stdout = Logger(osp.join(args.save_dir, log_name))

    print('** Arguments **')
    arg_keys = list(args.__dict__.keys())
    arg_keys.sort()
    for key in arg_keys:
        print('{}: {}'.format(key, args.__dict__[key]))
    print('\n')
    print('Collecting env info ...')
    print('** System info **\n{}\n'.format(collect_env_info()))

    if use_gpu:
        torch.backends.cudnn.benchmark = True
    else:
        warnings.warn(
            'Currently using CPU, however, GPU is highly recommended')

    dataset_vars = init_dataset(use_gpu)
    trainloader, valloader, testloader, num_attrs, attr_dict = dataset_vars

    if args.weighted_bce:
        print('Use weighted binary cross entropy')
        print('Computing the weights ...')
        bce_weights = torch.zeros(num_attrs, dtype=torch.float)
        for _, attrs, _ in trainloader:
            bce_weights += attrs.sum(0)  # sum along the batch dim
        bce_weights /= len(trainloader) * args.batch_size
        print('Sample ratio for each attribute: {}'.format(bce_weights))
        bce_weights = torch.exp(-1 * bce_weights)
        print('BCE weights: {}'.format(bce_weights))
        bce_weights = bce_weights.expand(args.batch_size, num_attrs)
        criterion = nn.BCEWithLogitsLoss(weight=bce_weights)

    else:
        print('Use plain binary cross entropy')
        criterion = nn.BCEWithLogitsLoss()

    print('Building model: {}'.format(args.arch))
    model = models.build_model(args.arch,
                               num_attrs,
                               pretrained=not args.no_pretrained,
                               use_gpu=use_gpu)
    num_params, flops = compute_model_complexity(
        model, (1, 3, args.height, args.width))
    print('Model complexity: params={:,} flops={:,}'.format(num_params, flops))

    if args.load_weights and check_isfile(args.load_weights):
        load_pretrained_weights(model, args.load_weights)

    if use_gpu:
        model = nn.DataParallel(model).cuda()
        criterion = criterion.cuda()

    if args.evaluate:
        test(model, testloader, attr_dict, use_gpu)
        return

    optimizer = torchreid.optim.build_optimizer(model,
                                                **optimizer_kwargs(args))
    scheduler = torchreid.optim.build_lr_scheduler(optimizer,
                                                   **lr_scheduler_kwargs(args))

    start_epoch = args.start_epoch
    best_result = -np.inf
    if args.resume and check_isfile(args.resume):
        checkpoint = torch.load(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        start_epoch = checkpoint['epoch']
        best_result = checkpoint['label_mA']
        print('Loaded checkpoint from "{}"'.format(args.resume))
        print('- start epoch: {}'.format(start_epoch))
        print('- label_mA: {}'.format(best_result))

    time_start = time.time()

    for epoch in range(start_epoch, args.max_epoch):
        train(epoch, model, criterion, optimizer, scheduler, trainloader,
              use_gpu)
        test_outputs = test(model, testloader, attr_dict, use_gpu)
        label_mA = test_outputs[0]
        is_best = label_mA > best_result
        if is_best:
            best_result = label_mA

        save_checkpoint(
            {
                'state_dict': model.state_dict(),
                'epoch': epoch + 1,
                'label_mA': label_mA,
                'optimizer': optimizer.state_dict(),
            },
            args.save_dir,
            is_best=is_best)

    elapsed = round(time.time() - time_start)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    print('Elapsed {}'.format(elapsed))
Exemple #10
0
def main():
    # parse arguments
    parser = build_base_argparser()
    parser.add_argument('-e',
                        '--auxiliary-models-cfg',
                        type=str,
                        nargs='*',
                        default='',
                        help='path to extra config files')
    parser.add_argument('--split-models',
                        action='store_true',
                        help='whether to split models on own gpu')
    parser.add_argument(
        '--aux-config-opts',
        nargs='+',
        default=None,
        help='Modify aux config options using the command-line')
    parser.add_argument('--epochs',
                        default=10,
                        type=int,
                        help='amount of the epochs')

    args = parser.parse_args()
    cfg = get_default_config()
    cfg.use_gpu = torch.cuda.is_available() and args.gpu_num > 0
    if args.config_file:
        merge_from_files_with_base(cfg, args.config_file)
    reset_config(cfg, args)
    cfg.merge_from_list(args.opts)

    set_random_seed(cfg.train.seed, cfg.train.deterministic)

    log_name = 'optuna.log'
    log_name += time.strftime('-%Y-%m-%d-%H-%M-%S')
    sys.stdout = Logger(osp.join(cfg.data.save_dir, log_name))

    print('Show configuration\n{}\n'.format(cfg))

    if cfg.use_gpu:
        torch.backends.cudnn.benchmark = True

    sampler = TPESampler(n_startup_trials=5, seed=True)
    study = optuna.create_study(study_name='classification task',
                                direction="maximize",
                                sampler=sampler)
    objective_partial = partial(objective, cfg, args)
    try:
        start_time = time.time()
        study.optimize(objective_partial,
                       n_trials=cfg.lr_finder.n_trials,
                       timeout=None)
        elapsed = round(time.time() - start_time)
        print(
            f"--- optimization is finished: {datetime.timedelta(seconds=elapsed)} ---"
        )

    except KeyboardInterrupt:
        finish_process(study)

    else:
        finish_process(study)
Exemple #11
0
import torch
import torchreid
import os
from collections import OrderedDict
import time
import os.path as osp
import sys
import logging

from torchreid.utils import (Logger, check_isfile, set_random_seed,
                             collect_env_info, resume_from_checkpoint,
                             load_pretrained_weights, compute_model_complexity)

log_name = 'test_log'
log_name += time.strftime('-%Y-%m-%d-%H-%M-%S')
sys.stdout = Logger(osp.join('base_', log_name))

checkpoint_path1 = 'models/market/model_name.tar'


def load_state_dict(checkpoint_path):
    if checkpoint_path and os.path.isfile(checkpoint_path):
        checkpoint = torch.load(checkpoint_path)
        state_dict_key = 'state_dict'
        if state_dict_key and state_dict_key in checkpoint:
            new_state_dict = OrderedDict()
            for k, v in checkpoint[state_dict_key].items():
                name = k[7:] if k.startswith('module') else k
                new_state_dict[name] = v
            state_dict = new_state_dict
        else:
Exemple #12
0
def main():

    # Load model configuration
    parser = argparse.ArgumentParser()
    parser.add_argument('-c',
                        '--config',
                        required=True,
                        help='path to configuration file')
    args = parser.parse_args()
    with open(args.config, "r") as ymlfile:
        config = yaml.load(ymlfile, Loader=yaml.FullLoader)

    # Automatically add sub-folder name to config["save_dir"], with the same name
    # as the config file. For example, config["save_dir"] is typically "logs",
    # so this would change config["save_dir"] to "logs/exp01", for example, so that
    # we don't need to change the save_dir in every single config file (it instead
    # automatically generates it from the name of the config file).
    experiment_number = pathlib.Path(args.config).stem
    config["save_dir"] = os.path.join(config["save_dir"], experiment_number)

    # Set random seeds
    set_random_seed(config["seed"])

    # Set up GPU
    if not config["use_avai_gpus"]:
        os.environ['CUDA_VISIBLE_DEVICES'] = config["gpu_devices"]
    use_gpu = torch.cuda.is_available() and not config["use_cpu"]

    # Set up log files
    log_name = 'test.log' if config["evaluate"] else 'train.log'
    log_name += time.strftime('-%Y-%m-%d-%H-%M-%S')
    sys.stdout = Logger(osp.join(config["save_dir"], log_name))

    # Prepare for training
    print('==========\nArgs:{}\n=========='.format(config))
    print('Collecting env info ...')
    print('** System info **\n{}\n'.format(collect_env_info()))
    if use_gpu:
        torch.backends.cudnn.benchmark = True
    else:
        warnings.warn(
            'Currently using CPU, however, GPU is highly recommended')

    # Build datamanager and model
    datamanager = build_datamanager(config)

    print('Building model: {}'.format(config["arch"]))
    model = torchreid.models.build_model(
        name=config["arch"],
        num_classes=datamanager.num_train_pids,
        loss=config["loss"].lower(),
        pretrained=(not config["no_pretrained"]),
        use_gpu=use_gpu)

    # Compute model complexity
    num_params, flops = compute_model_complexity(
        model, (1, 3, config["height"], config["width"]))
    print('Model complexity: params={:,} flops={:,}'.format(num_params, flops))

    # Load pretrained weights if necessary
    if config["load_weights"] and check_isfile(config["load_weights"]):
        load_pretrained_weights(model, config["load_weights"])

    # Set up multi-gpu
    if use_gpu:
        model = nn.DataParallel(model).cuda()

    # Model settings
    optimizer = torchreid.optim.build_optimizer(model,
                                                **optimizer_kwargs(config))
    scheduler = torchreid.optim.build_lr_scheduler(
        optimizer, **lr_scheduler_kwargs(config))

    if config["resume"] and check_isfile(config["resume"]):
        config["start_epoch"] = resume_from_checkpoint(config["resume"],
                                                       model,
                                                       optimizer=optimizer)

    print('Building {}-engine for {}-reid'.format(config["loss"],
                                                  config["app"]))
    engine = build_engine(config, datamanager, model, optimizer, scheduler)

    engine.run(**engine_run_kwargs(config))