Пример #1
0
def main():
    parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
    parser.add_argument('--config-file', '-c', type=str, required=True)
    parser.add_argument('--root', '-r', type=str, required=True)
    parser.add_argument('--save-dir', type=str, default='log')
    parser.add_argument('opts', default=None, nargs=REMAINDER)
    args = parser.parse_args()

    assert osp.exists(args.config_file)
    assert osp.exists(args.root)

    cfg = get_default_config()
    cfg.use_gpu = torch.cuda.is_available()
    if args.config_file:
        cfg.merge_from_file(args.config_file)
    reset_config(cfg, args)
    cfg.merge_from_list(args.opts)

    if cfg.use_gpu:
        torch.backends.cudnn.benchmark = True

    data_loader, num_pids = prepare_data(cfg, mode='gallery')

    print('Building model: {}'.format(cfg.model.name))
    model = torchreid.models.build_model(**model_kwargs(cfg, num_pids))

    if cfg.model.load_weights and check_isfile(cfg.model.load_weights):
        load_pretrained_weights(model, cfg.model.load_weights)

    if cfg.use_gpu:
        model = model.cuda()

    visualize_activation_map(model, data_loader, args.save_dir, cfg.data.width,
                             cfg.data.height, cfg.use_gpu)
Пример #2
0
def build_auxiliary_model(config_file,
                          num_classes,
                          use_gpu,
                          device_ids,
                          num_iter,
                          lr=None,
                          nncf_aux_config_changes=None,
                          aux_config_opts=None,
                          aux_pretrained_dict=None):
    aux_cfg = get_default_config()
    aux_cfg.use_gpu = use_gpu
    merge_from_files_with_base(aux_cfg, config_file)
    if nncf_aux_config_changes:
        print(
            f'applying to aux config changes from NNCF aux config {nncf_aux_config_changes}'
        )
        if not isinstance(nncf_aux_config_changes, CfgNode):
            nncf_aux_config_changes = CfgNode(nncf_aux_config_changes)
        aux_cfg.merge_from_other_cfg(nncf_aux_config_changes)
    if aux_config_opts:
        print(f'applying to aux config changes from command line arguments, '
              f'the changes are:\n{pformat(aux_config_opts)}')
        aux_cfg.merge_from_list(aux_config_opts)

    print(f'\nShow auxiliary configuration\n{aux_cfg}\n')

    if lr is not None:
        aux_cfg.train.lr = lr
        print(f"setting learning rate from main model: {lr}")
    model = torchreid.models.build_model(**model_kwargs(aux_cfg, num_classes))
    optimizer = torchreid.optim.build_optimizer(model,
                                                **optimizer_kwargs(aux_cfg))
    scheduler = torchreid.optim.build_lr_scheduler(
        optimizer=optimizer, num_iter=num_iter, **lr_scheduler_kwargs(aux_cfg))

    if aux_cfg.model.resume and check_isfile(aux_cfg.model.resume):
        aux_cfg.train.start_epoch = resume_from_checkpoint(
            aux_cfg.model.resume,
            model,
            optimizer=optimizer,
            scheduler=scheduler)

    elif aux_pretrained_dict is not None:
        load_pretrained_weights(model, pretrained_dict=aux_pretrained_dict)

    elif aux_cfg.model.load_weights and check_isfile(
            aux_cfg.model.load_weights):
        load_pretrained_weights(model, aux_cfg.model.load_weights)

    if aux_cfg.use_gpu:
        assert device_ids is not None

        if len(device_ids) > 1:
            model = DataParallel(model, device_ids=device_ids,
                                 output_device=0).cuda(device_ids[0])
        else:
            model = model.cuda(device_ids[0])

    return model, optimizer, scheduler
Пример #3
0
def main():
    parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--config-file', type=str, default='', help='path to config file')
    parser.add_argument('-s', '--sources', type=str, nargs='+', help='source datasets (delimited by space)')
    parser.add_argument('-t', '--targets', type=str, nargs='+', help='target datasets (delimited by space)')
    parser.add_argument('--root', type=str, default='', help='path to data root')
    parser.add_argument('opts', default=None, nargs=argparse.REMAINDER,
                        help='Modify config options using the command-line')
    args = parser.parse_args()

    cfg = get_default_config()
    cfg.use_gpu = torch.cuda.is_available()
    if args.config_file:
        cfg.merge_from_file(args.config_file)
    reset_config(cfg, args)
    cfg.merge_from_list(args.opts)
    set_random_seed(cfg.train.seed)

    log_name = 'test.log' if cfg.test.evaluate else 'train.log'
    log_name += time.strftime('-%Y-%m-%d-%H-%M-%S')
    sys.stdout = Logger(osp.join(cfg.data.save_dir, log_name))

    print('Show configuration\n{}\n'.format(cfg))
    print('Collecting env info ...')
    print('** System info **\n{}\n'.format(collect_env_info()))

    if cfg.use_gpu:
        torch.backends.cudnn.benchmark = True

    datamanager = build_datamanager(cfg)

    print('Building model: {}'.format(cfg.model.name))
    model = torchreid.models.build_model(**model_kwargs(cfg, datamanager.num_train_pids))
    num_params, flops = compute_model_complexity(model, (1, 3, cfg.data.height, cfg.data.width))
    print('Model complexity: params={:,} flops={:,}'.format(num_params, flops))

    if cfg.model.load_weights and check_isfile(cfg.model.load_weights):
        if cfg.model.pretrained and not cfg.test.evaluate:
            state_dict = torch.load(cfg.model.load_weights)
            model.load_pretrained_weights(state_dict)
        else:
            load_pretrained_weights(model, cfg.model.load_weights)

    if cfg.use_gpu:
        model = nn.DataParallel(model).cuda()

    optimizer = torchreid.optim.build_optimizer(model, **optimizer_kwargs(cfg))
    scheduler = torchreid.optim.build_lr_scheduler(optimizer, **lr_scheduler_kwargs(cfg))

    if cfg.model.resume and check_isfile(cfg.model.resume):
        cfg.train.start_epoch = resume_from_checkpoint(
            cfg.model.resume, model, optimizer=optimizer, scheduler=scheduler
        )

    print('Building {}-engine for {}-reid'.format(cfg.loss.name, cfg.data.type))
    engine = build_engine(cfg, datamanager, model, optimizer, scheduler)
    engine.run(**engine_run_kwargs(cfg))
Пример #4
0
def run_lr_finder(cfg,
                  datamanager,
                  model,
                  optimizer,
                  scheduler,
                  classes,
                  rebuild_model=True,
                  gpu_num=1,
                  split_models=False):
    if not rebuild_model:
        backup_model = deepcopy(model)

    engine = build_engine(cfg,
                          datamanager,
                          model,
                          optimizer,
                          scheduler,
                          initial_lr=cfg.train.lr)
    lr_finder = LrFinder(engine=engine, **lr_finder_run_kwargs(cfg))
    aux_lr = lr_finder.process()

    print(f"Estimated learning rate: {aux_lr}")
    if cfg.lr_finder.stop_after:
        print("Finding learning rate finished. Terminate the training process")
        sys.exit(0)

    # reload all parts of the training
    # we do not check classification parameters
    # and do not get num_train_classes the second time
    # since it's done above and lr finder cannot change parameters of the datasets
    cfg.train.lr = aux_lr
    cfg.lr_finder.enable = False
    set_random_seed(cfg.train.seed, cfg.train.deterministic)
    datamanager = build_datamanager(cfg, classes)
    num_train_classes = datamanager.num_train_pids

    if rebuild_model:
        backup_model = torchreid.models.build_model(
            **model_kwargs(cfg, num_train_classes))
        num_aux_models = len(cfg.mutual_learning.aux_configs)
        backup_model, _ = put_main_model_on_the_device(backup_model,
                                                       cfg.use_gpu, gpu_num,
                                                       num_aux_models,
                                                       split_models)

    optimizer = torchreid.optim.build_optimizer(backup_model,
                                                **optimizer_kwargs(cfg))
    scheduler = torchreid.optim.build_lr_scheduler(
        optimizer=optimizer,
        num_iter=datamanager.num_iter,
        **lr_scheduler_kwargs(cfg))

    return cfg.train.lr, backup_model, optimizer, scheduler
 def _create_model(self, config, from_scratch: bool = False):
     """
     Creates a model, based on the configuration in config
     :param config: deep-object-reid configuration from which the model has to be built
     :param from_scratch: bool, if True does not load any weights
     :return model: Model in training mode
     """
     num_train_classes = len(self._labels)
     model = torchreid.models.build_model(
         **model_kwargs(config, num_train_classes))
     if self._cfg.model.load_weights and not from_scratch:
         load_pretrained_weights(model, self._cfg.model.load_weights)
     return model
Пример #6
0
    def __init__(self,
                 config_path='',
                 model_path='',
                 device='cuda',
                 verbose=True):
        # Build model
        cfg = get_default_config()
        merge_from_files_with_base(cfg, config_path)
        cfg.use_gpu = device.startswith('cuda')
        model = build_model(**model_kwargs(cfg, 1))
        model.eval()

        image_size = (cfg.data.height, cfg.data.width)
        flops, num_params = get_model_complexity_info(
            model, (3, image_size[0], image_size[1]),
            as_strings=False,
            verbose=False,
            print_per_layer_stat=False)

        if verbose:
            print('Model: {}'.format(cfg.model.name))
            print('- params: {:,}'.format(num_params))
            print('- flops: {:,}'.format(flops))

        if model_path and check_isfile(model_path):
            load_pretrained_weights(model, model_path)

        # Build transform functions
        transforms = []
        transforms += [T.Resize(image_size)]
        transforms += [T.ToTensor()]
        print(cfg.data.norm_mean, cfg.data.norm_std)
        transforms += [
            T.Normalize(mean=cfg.data.norm_mean, std=cfg.data.norm_std)
        ]
        preprocess = T.Compose(transforms)

        to_pil = T.ToPILImage()

        device = torch.device(device)
        model.to(device)

        # Class attributes
        self.model = model
        self.preprocess = preprocess
        self.to_pil = to_pil
        self.device = device
def main():
    parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
    parser.add_argument('--config', '-c', type=str, required=True)
    parser.add_argument('opts', default=None, nargs=REMAINDER)
    args = parser.parse_args()

    assert osp.exists(args.config)

    cfg = get_default_config()
    cfg.use_gpu = torch.cuda.is_available()
    merge_from_files_with_base(cfg, args.config)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    model = torchreid.models.build_model(**model_kwargs(cfg, [0, 0]))
    load_pretrained_weights(model, cfg.model.load_weights)

    conv_layers = collect_conv_layers(model)
    show_stat(conv_layers)
def main():
    parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--config-file', type=str, default='', required=True,
                        help='Path to config file')
    parser.add_argument('--output-name', type=str, default='model',
                        help='Path to save ONNX model')
    parser.add_argument('--num-classes', type=int, nargs='+', default=None)
    parser.add_argument('--opset', type=int, default=11)
    parser.add_argument('--verbose', action='store_true',
                        help='Verbose mode for onnx.export')
    parser.add_argument('--disable-dyn-axes', default=False, action='store_true')
    parser.add_argument('--export_ir', action='store_true')
    parser.add_argument('opts', default=None, nargs=argparse.REMAINDER,
                        help='Modify config options using the command-line')
    args = parser.parse_args()

    cfg = get_default_config()
    cfg.use_gpu = torch.cuda.is_available()
    if args.config_file:
        merge_from_files_with_base(cfg, args.config_file)
    reset_config(cfg)
    cfg.merge_from_list(args.opts)

    compression_hyperparams = get_compression_hyperparams(cfg.model.load_weights)
    is_nncf_used = compression_hyperparams['enable_quantization'] or compression_hyperparams['enable_pruning']
    if is_nncf_used:
        print(f'Using NNCF -- making NNCF changes in config')
        cfg = make_nncf_changes_in_config(cfg,
                                          compression_hyperparams['enable_quantization'],
                                          compression_hyperparams['enable_pruning'],
                                          args.opts)
    cfg.train.mix_precision = False
    cfg.freeze()
    num_classes = parse_num_classes(source_datasets=cfg.data.sources,
                                    classification=cfg.model.type == 'classification' or cfg.model.type == 'multilabel',
                                    num_classes=args.num_classes,
                                    snap_path=cfg.model.load_weights)
    model = build_model(**model_kwargs(cfg, num_classes))
    if cfg.model.load_weights:
        load_pretrained_weights(model, cfg.model.load_weights)
    else:
        warnings.warn("No weights are passed through 'load_weights' parameter! "
              "The model will be converted with random or pretrained weights", category=RuntimeWarning)
    if 'tresnet' in cfg.model.name:
        patch_InplaceAbn_forward()
    if is_nncf_used:
        print('Begin making NNCF changes in model')
        model = make_nncf_changes_in_eval(model, cfg)
        print('End making NNCF changes in model')
    onnx_file_path = export_onnx(model=model.eval(),
                                 cfg=cfg,
                                 output_file_path=args.output_name,
                                 disable_dyn_axes=args.disable_dyn_axes,
                                 verbose=args.verbose,
                                 opset=args.opset,
                                 extra_check=True)
    if args.export_ir:
        input_shape = [1, 3, cfg.data.height, cfg.data.width]
        export_ir(onnx_model_path=onnx_file_path,
                  norm_mean=cfg.data.norm_mean,
                  norm_std=cfg.data.norm_std,
                  input_shape=input_shape,
                  optimized_model_dir=os.path.dirname(os.path.abspath(onnx_file_path)),
                  data_type='FP32')
Пример #9
0
def main():
    parser = build_base_argparser()
    parser.add_argument('-e',
                        '--auxiliary-models-cfg',
                        type=str,
                        nargs='*',
                        default='',
                        help='path to extra config files')
    parser.add_argument('--split-models',
                        action='store_true',
                        help='whether to split models on own gpu')
    parser.add_argument('--enable_quantization',
                        action='store_true',
                        help='Enable NNCF quantization algorithm')
    parser.add_argument('--enable_pruning',
                        action='store_true',
                        help='Enable NNCF pruning algorithm')
    parser.add_argument(
        '--aux-config-opts',
        nargs='+',
        default=None,
        help='Modify aux config options using the command-line')
    args = parser.parse_args()

    cfg = get_default_config()
    cfg.use_gpu = torch.cuda.is_available() and args.gpu_num > 0
    if args.config_file:
        merge_from_files_with_base(cfg, args.config_file)
    reset_config(cfg, args)
    cfg.merge_from_list(args.opts)

    is_nncf_used = args.enable_quantization or args.enable_pruning
    if is_nncf_used:
        print(f'Using NNCF -- making NNCF changes in config')
        cfg = make_nncf_changes_in_config(cfg, args.enable_quantization,
                                          args.enable_pruning, args.opts)

    set_random_seed(cfg.train.seed, cfg.train.deterministic)

    log_name = 'test.log' if cfg.test.evaluate else 'train.log'
    log_name += time.strftime('-%Y-%m-%d-%H-%M-%S')
    sys.stdout = Logger(osp.join(cfg.data.save_dir, log_name))

    print('Show configuration\n{}\n'.format(cfg))

    if cfg.use_gpu:
        torch.backends.cudnn.benchmark = True

    num_aux_models = len(cfg.mutual_learning.aux_configs)
    datamanager = build_datamanager(cfg, args.classes)
    num_train_classes = datamanager.num_train_pids

    print('Building main model: {}'.format(cfg.model.name))
    model = torchreid.models.build_model(
        **model_kwargs(cfg, num_train_classes))
    macs, num_params = get_model_complexity_info(
        model, (3, cfg.data.height, cfg.data.width),
        as_strings=False,
        verbose=False,
        print_per_layer_stat=False)
    print('Main model complexity: params={:,} flops={:,}'.format(
        num_params, macs * 2))

    aux_lr = cfg.train.lr  # placeholder, needed for aux models, may be filled by nncf part below
    if is_nncf_used:
        print('Begin making NNCF changes in model')
        if cfg.use_gpu:
            model.cuda()

        compression_ctrl, model, cfg, aux_lr, nncf_metainfo = \
            make_nncf_changes_in_training(model, cfg,
                                          args.classes,
                                          args.opts)

        should_freeze_aux_models = True
        print(f'should_freeze_aux_models = {should_freeze_aux_models}')
        print('End making NNCF changes in model')
    else:
        compression_ctrl = None
        should_freeze_aux_models = False
        nncf_metainfo = None
    # creating optimizer and scheduler -- it should be done after NNCF part, since
    # NNCF could change some parameters
    optimizer = torchreid.optim.build_optimizer(model, **optimizer_kwargs(cfg))

    if cfg.lr_finder.enable and not cfg.model.resume:
        scheduler = None
    else:
        scheduler = torchreid.optim.build_lr_scheduler(
            optimizer=optimizer,
            num_iter=datamanager.num_iter,
            **lr_scheduler_kwargs(cfg))
    # Loading model (and optimizer and scheduler in case of resuming training).
    # Note that if NNCF is used, loading is done inside NNCF part, so loading here is not required.
    if cfg.model.resume and check_isfile(
            cfg.model.resume) and not is_nncf_used:
        device_ = 'cuda' if cfg.use_gpu else 'cpu'
        cfg.train.start_epoch = resume_from_checkpoint(cfg.model.resume,
                                                       model,
                                                       optimizer=optimizer,
                                                       scheduler=scheduler,
                                                       device=device_)
    elif cfg.model.load_weights and not is_nncf_used:
        load_pretrained_weights(model, cfg.model.load_weights)

    if cfg.model.type == 'classification':
        check_classification_classes(model,
                                     datamanager,
                                     args.classes,
                                     test_only=cfg.test.evaluate)

    model, extra_device_ids = put_main_model_on_the_device(
        model, cfg.use_gpu, args.gpu_num, num_aux_models, args.split_models)

    if cfg.lr_finder.enable and not cfg.test.evaluate and not cfg.model.resume:
        aux_lr, model, optimizer, scheduler = run_lr_finder(
            cfg,
            datamanager,
            model,
            optimizer,
            scheduler,
            args.classes,
            rebuild_model=True,
            gpu_num=args.gpu_num,
            split_models=args.split_models)

    log_dir = cfg.data.tb_log_dir if cfg.data.tb_log_dir else cfg.data.save_dir
    run_training(cfg,
                 datamanager,
                 model,
                 optimizer,
                 scheduler,
                 extra_device_ids,
                 aux_lr,
                 tb_writer=SummaryWriter(log_dir=log_dir),
                 should_freeze_aux_models=should_freeze_aux_models,
                 nncf_metainfo=nncf_metainfo,
                 compression_ctrl=compression_ctrl)
Пример #10
0
def main():
    parser = build_base_argparser()
    args = parser.parse_args()

    cfg = get_default_config()
    cfg.use_gpu = torch.cuda.is_available() and args.gpu_num > 0
    if args.config_file:
        merge_from_files_with_base(cfg, args.config_file)
    reset_config(cfg, args)
    cfg.merge_from_list(args.opts)

    is_ie_model = cfg.model.load_weights.endswith('.xml')
    if not is_ie_model:
        compression_hyperparams = get_compression_hyperparams(
            cfg.model.load_weights)
        is_nncf_used = compression_hyperparams[
            'enable_quantization'] or compression_hyperparams['enable_pruning']

        if is_nncf_used:
            print(f'Using NNCF -- making NNCF changes in config')
            cfg = make_nncf_changes_in_config(
                cfg, compression_hyperparams['enable_quantization'],
                compression_hyperparams['enable_pruning'], args.opts)
    else:
        is_nncf_used = False

    set_random_seed(cfg.train.seed)

    log_name = 'test.log' + time.strftime('-%Y-%m-%d-%H-%M-%S')
    sys.stdout = Logger(osp.join(cfg.data.save_dir, log_name))
    datamanager = torchreid.data.ImageDataManager(filter_classes=args.classes,
                                                  **imagedata_kwargs(cfg))
    num_classes = len(
        datamanager.test_loader[cfg.data.targets[0]]['query'].dataset.classes)
    cfg.train.ema.enable = False
    if not is_ie_model:
        model = torchreid.models.build_model(**model_kwargs(cfg, num_classes))
        load_pretrained_weights(model, cfg.model.load_weights)
        if is_nncf_used:
            print('Begin making NNCF changes in model')
            model = make_nncf_changes_in_eval(model, cfg)
            print('End making NNCF changes in model')
        if cfg.use_gpu:
            num_devices = min(torch.cuda.device_count(), args.gpu_num)
            main_device_ids = list(range(num_devices))
            model = DataParallel(model,
                                 device_ids=main_device_ids,
                                 output_device=0).cuda(main_device_ids[0])
    else:
        from torchreid.utils.ie_tools import VectorCNN
        from openvino.inference_engine import IECore
        cfg.test.batch_size = 1
        model = VectorCNN(IECore(),
                          cfg.model.load_weights,
                          'CPU',
                          switch_rb=True,
                          **model_kwargs(cfg, num_classes))
        for _, dataloader in datamanager.test_loader.items():
            dataloader['query'].dataset.transform.transforms = \
                dataloader['query'].dataset.transform.transforms[:-2]

    if cfg.model.type == 'classification':
        check_classification_classes(model,
                                     datamanager,
                                     args.classes,
                                     test_only=True)

    engine = build_engine(cfg=cfg,
                          datamanager=datamanager,
                          model=model,
                          optimizer=None,
                          scheduler=None)
    engine.test(0,
                dist_metric=cfg.test.dist_metric,
                normalize_feature=cfg.test.normalize_feature,
                visrank=cfg.test.visrank,
                visrank_topk=cfg.test.visrank_topk,
                save_dir=cfg.data.save_dir,
                use_metric_cuhk03=cfg.cuhk03.use_metric_cuhk03,
                ranks=(1, 5, 10, 20),
                rerank=cfg.test.rerank)
Пример #11
0
def objective(cfg, args, trial):
    # Generate the trials.
    # g_ = trial.suggest_int("g_", 1, 7)
    # asl_pm = trial.suggest_float("asl_pm", 0, 0.5)
    # m = trial.suggest_float("m", 0.01, 0.7)
    # s = trial.suggest_int("s", 5, 60)
    lr = trial.suggest_float("lr", 0.001, 0.5)
    # t = trial.suggest_int("t", 1, 7)
    # cfg.loss.softmax.m = m
    # cfg.loss.softmax.s = s
    # cfg.loss.asl.p_m = asl_pm
    # cfg.loss.am_binary.amb_t = t
    cfg.train.lr = lr

    # geterate damanager
    num_aux_models = len(cfg.mutual_learning.aux_configs)
    datamanager = build_datamanager(cfg, args.classes)

    # build the model.
    num_train_classes = datamanager.num_train_pids
    print('Building main model: {}'.format(cfg.model.name))
    model = torchreid.models.build_model(
        **model_kwargs(cfg, num_train_classes))
    aux_lr = cfg.train.lr  # placeholder, needed for aux models, may be filled by nncf part below
    compression_ctrl = None
    should_freeze_aux_models = False
    nncf_metainfo = None
    optimizer = torchreid.optim.build_optimizer(model, **optimizer_kwargs(cfg))
    scheduler = torchreid.optim.build_lr_scheduler(
        optimizer=optimizer,
        num_iter=datamanager.num_iter,
        **lr_scheduler_kwargs(cfg))
    # Loading model (and optimizer and scheduler in case of resuming training).
    if cfg.model.load_weights and check_isfile(cfg.model.load_weights):
        load_pretrained_weights(model, cfg.model.load_weights)

    if cfg.model.type == 'classification':
        check_classification_classes(model,
                                     datamanager,
                                     args.classes,
                                     test_only=cfg.test.evaluate)

    model, extra_device_ids = put_main_model_on_the_device(
        model, cfg.use_gpu, args.gpu_num, num_aux_models, args.split_models)

    num_aux_models = len(cfg.mutual_learning.aux_configs)
    num_train_classes = datamanager.num_train_pids

    if num_aux_models > 0:
        print(
            f'Enabled mutual learning between {len(cfg.mutual_learning.aux_configs) + 1} models.'
        )

        models, optimizers, schedulers = [model], [optimizer], [scheduler]
        for config_file, device_ids in zip(cfg.mutual_learning.aux_configs,
                                           extra_device_ids):
            aux_model, aux_optimizer, aux_scheduler = build_auxiliary_model(
                config_file,
                num_train_classes,
                cfg.use_gpu,
                device_ids,
                num_iter=datamanager.num_iter,
                lr=aux_lr,
                aux_config_opts=args.aux_config_opts)

            models.append(aux_model)
            optimizers.append(aux_optimizer)
            schedulers.append(aux_scheduler)
    else:
        models, optimizers, schedulers = model, optimizer, scheduler
    print(f'Building {cfg.loss.name}-engine')
    engine = build_engine(cfg,
                          datamanager,
                          models,
                          optimizers,
                          schedulers,
                          should_freeze_aux_models=should_freeze_aux_models,
                          nncf_metainfo=nncf_metainfo,
                          compression_ctrl=compression_ctrl,
                          initial_lr=aux_lr)
    test_acc = AverageMeter()
    obj = 0
    engine.start_epoch = 0
    engine.max_epoch = args.epochs
    print(f"\nnext trial with [lr: {lr}]")

    for engine.epoch in range(args.epochs):
        np.random.seed(cfg.train.seed + engine.epoch)
        avg_loss = engine.train(print_freq=20000,
                                fixbase_epoch=0,
                                open_layers=None,
                                lr_finder=False,
                                perf_monitor=None,
                                stop_callback=None)

        top1, _ = engine.test(
            engine.epoch,
            lr_finder=False,
        )

        test_acc.update(top1)
        smooth_top1 = test_acc.avg
        target_metric = smooth_top1 if engine.target_metric == 'test_acc' else avg_loss

        obj = top1
        if not engine.per_batch_annealing:
            engine.update_lr(output_avg_metric=target_metric)

        trial.report(obj, engine.epoch)

        # Handle pruning based on the intermediate value.
        if trial.should_prune():
            raise optuna.exceptions.TrialPruned()

        should_exit, _ = engine.exit_on_plateau_and_choose_best(
            top1, smooth_top1)
        should_exit = engine.early_stoping and should_exit
        if should_exit:
            break

    return obj
Пример #12
0
def main():

    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--config-file',
                        type=str,
                        default='',
                        help='Path to config file')
    parser.add_argument('--output-name',
                        type=str,
                        default='model',
                        help='Path to save ONNX model')
    parser.add_argument('--opset', type=int, default=9)
    parser.add_argument('--verbose',
                        default=False,
                        action='store_true',
                        help='Verbose mode for onnx.export')
    parser.add_argument('opts',
                        default=None,
                        nargs=argparse.REMAINDER,
                        help='Modify config options using the command-line')
    args = parser.parse_args()

    cfg = get_default_config()
    cfg.use_gpu = torch.cuda.is_available()
    if args.config_file:
        cfg.merge_from_file(args.config_file)
    reset_config(cfg)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    num_classes = parse_num_classes(cfg.data.sources)
    model = build_model(**model_kwargs(cfg, num_classes))
    load_pretrained_weights(model, cfg.model.load_weights)
    model.eval()

    transform = build_inference_transform(
        cfg.data.height,
        cfg.data.width,
        norm_mean=cfg.data.norm_mean,
        norm_std=cfg.data.norm_std,
    )

    input_img = random_image(cfg.data.height, cfg.data.width)
    input_blob = transform(input_img).unsqueeze(0)

    input_names = ['data']
    output_names = ['reid_embedding']
    dynamic_axes = {
        'data': {
            0: 'batch_size',
            1: 'channels',
            2: 'height',
            3: 'width'
        },
        'reid_embedding': {
            0: 'batch_size',
            1: 'dim'
        }
    }

    output_file_path = args.output_name
    if not args.output_name.endswith('.onnx'):
        output_file_path += '.onnx'

    register_op("group_norm", group_norm_symbolic, "", args.opset)
    with torch.no_grad():
        torch.onnx.export(
            model,
            input_blob,
            output_file_path,
            verbose=args.verbose,
            export_params=True,
            input_names=input_names,
            output_names=output_names,
            dynamic_axes=dynamic_axes,
            opset_version=args.opset,
            operator_export_type=torch.onnx.OperatorExportTypes.ONNX)

    net_from_onnx = onnx.load(output_file_path)
    try:
        onnx.checker.check_model(net_from_onnx)
        print('ONNX check passed.')
    except onnx.onnx_cpp2py_export.checker.ValidationError as ex:
        print('ONNX check failed: {}.'.format(ex))
Пример #13
0
def main():
    parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
    parser.add_argument('--config-file', '-c', type=str, required=True)
    parser.add_argument('--weights', '-w', type=str, required=True)
    parser.add_argument('--root', '-r', type=str, required=True)
    parser.add_argument('--out-dir', '-o', type=str, required=True)
    parser.add_argument('--matrix-size',
                        '-ms',
                        type=int,
                        required=False,
                        default=8)
    parser.add_argument('opts', default=None, nargs=REMAINDER)
    args = parser.parse_args()

    assert exists(args.config_file)
    assert exists(args.weights)
    assert exists(args.root)

    create_dirs(args.out_dir)

    cfg = get_default_config()
    cfg.use_gpu = torch.cuda.is_available()
    if args.config_file:
        merge_from_files_with_base(cfg, args.config_file)
    reset_config(cfg, args)
    cfg.merge_from_list(args.opts)

    if cfg.use_gpu:
        torch.backends.cudnn.benchmark = True

    target_dataset = 'cityflow'
    data_query = build_query(cfg, target_dataset)
    data_gallery, gallery_size = build_gallery(cfg, target_dataset)

    print('Building model: {}'.format(cfg.model.name))
    model = torchreid.models.build_model(**model_kwargs(cfg, [0]))

    print('Loading model: {}'.format(cfg.model.load_weights))
    cfg.model.load_weights = args.weights
    load_pretrained_weights(model, cfg.model.load_weights)
    model = model.cuda() if cfg.use_gpu else model

    print('Extracting query embeddings ...')
    images_query, embeddings_query, ids_query = run_model(
        model, data_query, cfg.use_gpu)

    print('Extracting gallery embeddings ...')
    images_gallery, embeddings_gallery, ids_gallery = run_model(
        model, data_gallery, cfg.use_gpu)

    print('Calculating distance matrices ...')
    distance_matrix_qg = calculate_distances(embeddings_query,
                                             embeddings_gallery)

    print('Finding matches ...')
    top_k = args.matrix_size**2 - 1
    matches = find_matches(distance_matrix_qg, top_k=top_k)

    print('Dumping visualizations ...')
    visualize_matches(matches, images_query, images_gallery, ids_query,
                      ids_gallery, args.matrix_size, args.out_dir)
Пример #14
0
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--config-file',
                        type=str,
                        default='',
                        required=True,
                        help='path to config file')
    parser.add_argument(
        '--custom-roots',
        type=str,
        nargs='+',
        help=
        'types or paths to annotation of custom datasets (delimited by space)')
    parser.add_argument('--custom-types',
                        type=str,
                        nargs='+',
                        help='path of custom datasets (delimited by space)')
    parser.add_argument('--custom-names',
                        type=str,
                        nargs='+',
                        help='names of custom datasets (delimited by space)')
    parser.add_argument('--root',
                        type=str,
                        default='',
                        help='path to data root')
    parser.add_argument('--classes',
                        type=str,
                        nargs='+',
                        help='name of classes in classification dataset')
    parser.add_argument('--out')
    parser.add_argument('opts',
                        default=None,
                        nargs=argparse.REMAINDER,
                        help='Modify config options using the command-line')
    args = parser.parse_args()

    cfg = get_default_config()
    cfg.use_gpu = torch.cuda.is_available()
    if args.config_file:
        merge_from_files_with_base(cfg, args.config_file)
    reset_config(cfg, args)
    cfg.merge_from_list(args.opts)
    set_random_seed(cfg.train.seed)

    print('Show configuration\n{}\n'.format(cfg))
    print('Collecting env info ...')
    print('** System info **\n{}\n'.format(collect_env_info()))

    if cfg.use_gpu:
        torch.backends.cudnn.benchmark = True

    datamanager = build_datamanager(cfg, args.classes)
    num_train_classes = datamanager.num_train_pids

    print('Building main model: {}'.format(cfg.model.name))
    model = torchreid.models.build_model(
        **model_kwargs(cfg, num_train_classes))
    macs, num_params = get_model_complexity_info(
        model, (3, cfg.data.height, cfg.data.width),
        as_strings=False,
        verbose=False,
        print_per_layer_stat=False)
    print('Main model complexity: M params={:,} G flops={:,}'.format(
        num_params / 10**6, macs * 2 / 10**9))

    if args.out:
        out = list()
        out.append({
            'key': 'size',
            'display_name': 'Size',
            'value': num_params / 10**6,
            'unit': 'Mp'
        })
        out.append({
            'key': 'complexity',
            'display_name': 'Complexity',
            'value': 2 * macs / 10**9,
            'unit': 'GFLOPs'
        })
        print('dump to' + args.out)
        with open(args.out, 'w') as write_file:
            json.dump(out, write_file, indent=4)