예제 #1
0
def main():
    parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
    parser.add_argument('--config-file', '-c', type=str, required=True)
    parser.add_argument('--root', '-r', type=str, required=True)
    parser.add_argument('--save-dir', type=str, default='log')
    parser.add_argument('opts', default=None, nargs=REMAINDER)
    args = parser.parse_args()

    assert osp.exists(args.config_file)
    assert osp.exists(args.root)

    cfg = get_default_config()
    cfg.use_gpu = torch.cuda.is_available()
    if args.config_file:
        cfg.merge_from_file(args.config_file)
    reset_config(cfg, args)
    cfg.merge_from_list(args.opts)

    if cfg.use_gpu:
        torch.backends.cudnn.benchmark = True

    data_loader, num_pids = prepare_data(cfg, mode='gallery')

    print('Building model: {}'.format(cfg.model.name))
    model = torchreid.models.build_model(**model_kwargs(cfg, num_pids))

    if cfg.model.load_weights and check_isfile(cfg.model.load_weights):
        load_pretrained_weights(model, cfg.model.load_weights)

    if cfg.use_gpu:
        model = model.cuda()

    visualize_activation_map(model, data_loader, args.save_dir, cfg.data.width,
                             cfg.data.height, cfg.use_gpu)
예제 #2
0
def build_auxiliary_model(config_file,
                          num_classes,
                          use_gpu,
                          device_ids,
                          num_iter,
                          lr=None,
                          nncf_aux_config_changes=None,
                          aux_config_opts=None,
                          aux_pretrained_dict=None):
    aux_cfg = get_default_config()
    aux_cfg.use_gpu = use_gpu
    merge_from_files_with_base(aux_cfg, config_file)
    if nncf_aux_config_changes:
        print(
            f'applying to aux config changes from NNCF aux config {nncf_aux_config_changes}'
        )
        if not isinstance(nncf_aux_config_changes, CfgNode):
            nncf_aux_config_changes = CfgNode(nncf_aux_config_changes)
        aux_cfg.merge_from_other_cfg(nncf_aux_config_changes)
    if aux_config_opts:
        print(f'applying to aux config changes from command line arguments, '
              f'the changes are:\n{pformat(aux_config_opts)}')
        aux_cfg.merge_from_list(aux_config_opts)

    print(f'\nShow auxiliary configuration\n{aux_cfg}\n')

    if lr is not None:
        aux_cfg.train.lr = lr
        print(f"setting learning rate from main model: {lr}")
    model = torchreid.models.build_model(**model_kwargs(aux_cfg, num_classes))
    optimizer = torchreid.optim.build_optimizer(model,
                                                **optimizer_kwargs(aux_cfg))
    scheduler = torchreid.optim.build_lr_scheduler(
        optimizer=optimizer, num_iter=num_iter, **lr_scheduler_kwargs(aux_cfg))

    if aux_cfg.model.resume and check_isfile(aux_cfg.model.resume):
        aux_cfg.train.start_epoch = resume_from_checkpoint(
            aux_cfg.model.resume,
            model,
            optimizer=optimizer,
            scheduler=scheduler)

    elif aux_pretrained_dict is not None:
        load_pretrained_weights(model, pretrained_dict=aux_pretrained_dict)

    elif aux_cfg.model.load_weights and check_isfile(
            aux_cfg.model.load_weights):
        load_pretrained_weights(model, aux_cfg.model.load_weights)

    if aux_cfg.use_gpu:
        assert device_ids is not None

        if len(device_ids) > 1:
            model = DataParallel(model, device_ids=device_ids,
                                 output_device=0).cuda(device_ids[0])
        else:
            model = model.cuda(device_ids[0])

    return model, optimizer, scheduler
예제 #3
0
def main():
    parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--config-file', type=str, default='', help='path to config file')
    parser.add_argument('-s', '--sources', type=str, nargs='+', help='source datasets (delimited by space)')
    parser.add_argument('-t', '--targets', type=str, nargs='+', help='target datasets (delimited by space)')
    parser.add_argument('--root', type=str, default='', help='path to data root')
    parser.add_argument('opts', default=None, nargs=argparse.REMAINDER,
                        help='Modify config options using the command-line')
    args = parser.parse_args()

    cfg = get_default_config()
    cfg.use_gpu = torch.cuda.is_available()
    if args.config_file:
        cfg.merge_from_file(args.config_file)
    reset_config(cfg, args)
    cfg.merge_from_list(args.opts)
    set_random_seed(cfg.train.seed)

    log_name = 'test.log' if cfg.test.evaluate else 'train.log'
    log_name += time.strftime('-%Y-%m-%d-%H-%M-%S')
    sys.stdout = Logger(osp.join(cfg.data.save_dir, log_name))

    print('Show configuration\n{}\n'.format(cfg))
    print('Collecting env info ...')
    print('** System info **\n{}\n'.format(collect_env_info()))

    if cfg.use_gpu:
        torch.backends.cudnn.benchmark = True

    datamanager = build_datamanager(cfg)

    print('Building model: {}'.format(cfg.model.name))
    model = torchreid.models.build_model(**model_kwargs(cfg, datamanager.num_train_pids))
    num_params, flops = compute_model_complexity(model, (1, 3, cfg.data.height, cfg.data.width))
    print('Model complexity: params={:,} flops={:,}'.format(num_params, flops))

    if cfg.model.load_weights and check_isfile(cfg.model.load_weights):
        if cfg.model.pretrained and not cfg.test.evaluate:
            state_dict = torch.load(cfg.model.load_weights)
            model.load_pretrained_weights(state_dict)
        else:
            load_pretrained_weights(model, cfg.model.load_weights)

    if cfg.use_gpu:
        model = nn.DataParallel(model).cuda()

    optimizer = torchreid.optim.build_optimizer(model, **optimizer_kwargs(cfg))
    scheduler = torchreid.optim.build_lr_scheduler(optimizer, **lr_scheduler_kwargs(cfg))

    if cfg.model.resume and check_isfile(cfg.model.resume):
        cfg.train.start_epoch = resume_from_checkpoint(
            cfg.model.resume, model, optimizer=optimizer, scheduler=scheduler
        )

    print('Building {}-engine for {}-reid'.format(cfg.loss.name, cfg.data.type))
    engine = build_engine(cfg, datamanager, model, optimizer, scheduler)
    engine.run(**engine_run_kwargs(cfg))
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
        description=
        'The script adds the default int8 quantization NNCF metainfo '
        'to NNCF deep-object-reid checkpoints '
        'that were trained when NNCF metainfo was not '
        'stored in NNCF checkpoints')
    parser.add_argument('--config-file',
                        type=str,
                        required=True,
                        help='path to config file')
    parser.add_argument('--checkpoint',
                        type=str,
                        required=True,
                        help='path to the src checkpoint file')
    parser.add_argument(
        '--dst-folder',
        type=str,
        required=True,
        help='path to the dst folder to store dst checkpoint file')
    args = parser.parse_args()

    cfg = get_default_config()
    merge_from_files_with_base(cfg, args.config_file)
    checkpoint = torch.load(args.checkpoint, map_location='cpu')
    if not isinstance(checkpoint, dict):
        raise RuntimeError(
            'Wrong format of checkpoint -- it is not the result of deep-object-reid training'
        )
    if checkpoint.get('nncf_metainfo'):
        raise RuntimeError(
            f'Checkpoint {args.checkpoint} already has nncf_metainfo')

    if not os.path.isdir(args.dst_folder):
        raise RuntimeError(f'The dst folder {args.dst_folder} is NOT present')

    # default nncf config
    h, w = cfg.data.height, cfg.data.width
    nncf_config_data = get_default_nncf_compression_config(h, w)

    nncf_metainfo = {
        'nncf_compression_enabled': True,
        'nncf_config': nncf_config_data
    }
    checkpoint['nncf_metainfo'] = nncf_metainfo
    res_path = save_checkpoint(checkpoint, args.dst_folder)
    def __init__(self, task_environment: TaskEnvironment):
        logger.info("Loading OTEClassificationTask.")
        self._scratch_space = tempfile.mkdtemp(prefix="ote-cls-scratch-")
        logger.info(f"Scratch space created at {self._scratch_space}")

        self._task_environment = task_environment
        if len(task_environment.get_labels(False)) == 1:
            self._labels = task_environment.get_labels(include_empty=True)
        else:
            self._labels = task_environment.get_labels(include_empty=False)
        self._empty_label = get_empty_label(task_environment.label_schema)
        self._multilabel = len(task_environment.label_schema.get_groups(False)) > 1 and \
                len(task_environment.label_schema.get_groups(False)) == \
                len(task_environment.get_labels(include_empty=False))

        self._hierarchical = False
        if not self._multilabel and len(
                task_environment.label_schema.get_groups(False)) > 1:
            self._labels = get_leaf_labels(task_environment.label_schema)
            self._hierarchical = True

        template_file_path = task_environment.model_template.model_template_path

        self._base_dir = os.path.abspath(os.path.dirname(template_file_path))

        self._cfg = get_default_config()
        self._patch_config(self._base_dir)

        if self._multilabel:
            assert self._cfg.model.type == 'multilabel', task_environment.model_template.model_template_path + \
                ' model template does not support multiclass classification'
        else:
            assert self._cfg.model.type == 'classification', task_environment.model_template.model_template_path + \
                ' model template does not support multilabel classification'

        self.device = torch.device(
            "cuda:0") if torch.cuda.device_count() else torch.device("cpu")
        self._model = self._load_model(task_environment.model,
                                       device=self.device)

        self.stop_callback = StopCallback()
        self.metrics_monitor = DefaultMetricsMonitor()

        # Set default model attributes.
        self._optimization_methods = []
        self._precision = [ModelPrecision.FP32]
        self._optimization_type = ModelOptimizationType.MO
예제 #6
0
    def __init__(self,
                 config_path='',
                 model_path='',
                 device='cuda',
                 verbose=True):
        # Build model
        cfg = get_default_config()
        merge_from_files_with_base(cfg, config_path)
        cfg.use_gpu = device.startswith('cuda')
        model = build_model(**model_kwargs(cfg, 1))
        model.eval()

        image_size = (cfg.data.height, cfg.data.width)
        flops, num_params = get_model_complexity_info(
            model, (3, image_size[0], image_size[1]),
            as_strings=False,
            verbose=False,
            print_per_layer_stat=False)

        if verbose:
            print('Model: {}'.format(cfg.model.name))
            print('- params: {:,}'.format(num_params))
            print('- flops: {:,}'.format(flops))

        if model_path and check_isfile(model_path):
            load_pretrained_weights(model, model_path)

        # Build transform functions
        transforms = []
        transforms += [T.Resize(image_size)]
        transforms += [T.ToTensor()]
        print(cfg.data.norm_mean, cfg.data.norm_std)
        transforms += [
            T.Normalize(mean=cfg.data.norm_mean, std=cfg.data.norm_std)
        ]
        preprocess = T.Compose(transforms)

        to_pil = T.ToPILImage()

        device = torch.device(device)
        model.to(device)

        # Class attributes
        self.model = model
        self.preprocess = preprocess
        self.to_pil = to_pil
        self.device = device
def main():
    parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
    parser.add_argument('--config', '-c', type=str, required=True)
    parser.add_argument('opts', default=None, nargs=REMAINDER)
    args = parser.parse_args()

    assert osp.exists(args.config)

    cfg = get_default_config()
    cfg.use_gpu = torch.cuda.is_available()
    merge_from_files_with_base(cfg, args.config)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    model = torchreid.models.build_model(**model_kwargs(cfg, [0, 0]))
    load_pretrained_weights(model, cfg.model.load_weights)

    conv_layers = collect_conv_layers(model)
    show_stat(conv_layers)
    def _patch_config(self, base_dir: str):
        self._cfg = get_default_config()
        if self._multilabel:
            config_file_path = os.path.join(base_dir,
                                            'main_model_multilabel.yaml')
        else:
            config_file_path = os.path.join(base_dir, 'main_model.yaml')
        merge_from_files_with_base(self._cfg, config_file_path)
        self._cfg.use_gpu = torch.cuda.device_count() > 0
        self.num_devices = 1 if self._cfg.use_gpu else 0

        self._cfg.custom_datasets.types = [
            'external_classification_wrapper',
            'external_classification_wrapper'
        ]
        self._cfg.custom_datasets.names = ['train', 'val']
        self._cfg.custom_datasets.roots = [''] * 2
        self._cfg.data.sources = ['train']
        self._cfg.data.targets = ['val']
        self._cfg.data.save_dir = self._scratch_space

        self._cfg.test.test_before_train = False
        self.num_classes = len(self._labels)

        for i, conf in enumerate(self._cfg.mutual_learning.aux_configs):
            if str(base_dir) not in conf:
                self._cfg.mutual_learning.aux_configs[i] = os.path.join(
                    base_dir, conf)

        self._cfg.train.lr = self._hyperparams.learning_parameters.learning_rate
        self._cfg.train.batch_size = self._hyperparams.learning_parameters.batch_size
        self._cfg.test.batch_size = max(
            1, self._hyperparams.learning_parameters.batch_size // 2)
        self._cfg.train.max_epoch = self._hyperparams.learning_parameters.max_num_epochs
        self._cfg.lr_finder.enable = self._hyperparams.learning_parameters.enable_lr_finder
        self._cfg.train.early_stopping = self._hyperparams.learning_parameters.enable_early_stopping
def main():
    parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--config-file', type=str, default='', required=True,
                        help='Path to config file')
    parser.add_argument('--output-name', type=str, default='model',
                        help='Path to save ONNX model')
    parser.add_argument('--num-classes', type=int, nargs='+', default=None)
    parser.add_argument('--opset', type=int, default=11)
    parser.add_argument('--verbose', action='store_true',
                        help='Verbose mode for onnx.export')
    parser.add_argument('--disable-dyn-axes', default=False, action='store_true')
    parser.add_argument('--export_ir', action='store_true')
    parser.add_argument('opts', default=None, nargs=argparse.REMAINDER,
                        help='Modify config options using the command-line')
    args = parser.parse_args()

    cfg = get_default_config()
    cfg.use_gpu = torch.cuda.is_available()
    if args.config_file:
        merge_from_files_with_base(cfg, args.config_file)
    reset_config(cfg)
    cfg.merge_from_list(args.opts)

    compression_hyperparams = get_compression_hyperparams(cfg.model.load_weights)
    is_nncf_used = compression_hyperparams['enable_quantization'] or compression_hyperparams['enable_pruning']
    if is_nncf_used:
        print(f'Using NNCF -- making NNCF changes in config')
        cfg = make_nncf_changes_in_config(cfg,
                                          compression_hyperparams['enable_quantization'],
                                          compression_hyperparams['enable_pruning'],
                                          args.opts)
    cfg.train.mix_precision = False
    cfg.freeze()
    num_classes = parse_num_classes(source_datasets=cfg.data.sources,
                                    classification=cfg.model.type == 'classification' or cfg.model.type == 'multilabel',
                                    num_classes=args.num_classes,
                                    snap_path=cfg.model.load_weights)
    model = build_model(**model_kwargs(cfg, num_classes))
    if cfg.model.load_weights:
        load_pretrained_weights(model, cfg.model.load_weights)
    else:
        warnings.warn("No weights are passed through 'load_weights' parameter! "
              "The model will be converted with random or pretrained weights", category=RuntimeWarning)
    if 'tresnet' in cfg.model.name:
        patch_InplaceAbn_forward()
    if is_nncf_used:
        print('Begin making NNCF changes in model')
        model = make_nncf_changes_in_eval(model, cfg)
        print('End making NNCF changes in model')
    onnx_file_path = export_onnx(model=model.eval(),
                                 cfg=cfg,
                                 output_file_path=args.output_name,
                                 disable_dyn_axes=args.disable_dyn_axes,
                                 verbose=args.verbose,
                                 opset=args.opset,
                                 extra_check=True)
    if args.export_ir:
        input_shape = [1, 3, cfg.data.height, cfg.data.width]
        export_ir(onnx_model_path=onnx_file_path,
                  norm_mean=cfg.data.norm_mean,
                  norm_std=cfg.data.norm_std,
                  input_shape=input_shape,
                  optimized_model_dir=os.path.dirname(os.path.abspath(onnx_file_path)),
                  data_type='FP32')
예제 #10
0
def main():
    parser = build_base_argparser()
    parser.add_argument('-e',
                        '--auxiliary-models-cfg',
                        type=str,
                        nargs='*',
                        default='',
                        help='path to extra config files')
    parser.add_argument('--split-models',
                        action='store_true',
                        help='whether to split models on own gpu')
    parser.add_argument('--enable_quantization',
                        action='store_true',
                        help='Enable NNCF quantization algorithm')
    parser.add_argument('--enable_pruning',
                        action='store_true',
                        help='Enable NNCF pruning algorithm')
    parser.add_argument(
        '--aux-config-opts',
        nargs='+',
        default=None,
        help='Modify aux config options using the command-line')
    args = parser.parse_args()

    cfg = get_default_config()
    cfg.use_gpu = torch.cuda.is_available() and args.gpu_num > 0
    if args.config_file:
        merge_from_files_with_base(cfg, args.config_file)
    reset_config(cfg, args)
    cfg.merge_from_list(args.opts)

    is_nncf_used = args.enable_quantization or args.enable_pruning
    if is_nncf_used:
        print(f'Using NNCF -- making NNCF changes in config')
        cfg = make_nncf_changes_in_config(cfg, args.enable_quantization,
                                          args.enable_pruning, args.opts)

    set_random_seed(cfg.train.seed, cfg.train.deterministic)

    log_name = 'test.log' if cfg.test.evaluate else 'train.log'
    log_name += time.strftime('-%Y-%m-%d-%H-%M-%S')
    sys.stdout = Logger(osp.join(cfg.data.save_dir, log_name))

    print('Show configuration\n{}\n'.format(cfg))

    if cfg.use_gpu:
        torch.backends.cudnn.benchmark = True

    num_aux_models = len(cfg.mutual_learning.aux_configs)
    datamanager = build_datamanager(cfg, args.classes)
    num_train_classes = datamanager.num_train_pids

    print('Building main model: {}'.format(cfg.model.name))
    model = torchreid.models.build_model(
        **model_kwargs(cfg, num_train_classes))
    macs, num_params = get_model_complexity_info(
        model, (3, cfg.data.height, cfg.data.width),
        as_strings=False,
        verbose=False,
        print_per_layer_stat=False)
    print('Main model complexity: params={:,} flops={:,}'.format(
        num_params, macs * 2))

    aux_lr = cfg.train.lr  # placeholder, needed for aux models, may be filled by nncf part below
    if is_nncf_used:
        print('Begin making NNCF changes in model')
        if cfg.use_gpu:
            model.cuda()

        compression_ctrl, model, cfg, aux_lr, nncf_metainfo = \
            make_nncf_changes_in_training(model, cfg,
                                          args.classes,
                                          args.opts)

        should_freeze_aux_models = True
        print(f'should_freeze_aux_models = {should_freeze_aux_models}')
        print('End making NNCF changes in model')
    else:
        compression_ctrl = None
        should_freeze_aux_models = False
        nncf_metainfo = None
    # creating optimizer and scheduler -- it should be done after NNCF part, since
    # NNCF could change some parameters
    optimizer = torchreid.optim.build_optimizer(model, **optimizer_kwargs(cfg))

    if cfg.lr_finder.enable and not cfg.model.resume:
        scheduler = None
    else:
        scheduler = torchreid.optim.build_lr_scheduler(
            optimizer=optimizer,
            num_iter=datamanager.num_iter,
            **lr_scheduler_kwargs(cfg))
    # Loading model (and optimizer and scheduler in case of resuming training).
    # Note that if NNCF is used, loading is done inside NNCF part, so loading here is not required.
    if cfg.model.resume and check_isfile(
            cfg.model.resume) and not is_nncf_used:
        device_ = 'cuda' if cfg.use_gpu else 'cpu'
        cfg.train.start_epoch = resume_from_checkpoint(cfg.model.resume,
                                                       model,
                                                       optimizer=optimizer,
                                                       scheduler=scheduler,
                                                       device=device_)
    elif cfg.model.load_weights and not is_nncf_used:
        load_pretrained_weights(model, cfg.model.load_weights)

    if cfg.model.type == 'classification':
        check_classification_classes(model,
                                     datamanager,
                                     args.classes,
                                     test_only=cfg.test.evaluate)

    model, extra_device_ids = put_main_model_on_the_device(
        model, cfg.use_gpu, args.gpu_num, num_aux_models, args.split_models)

    if cfg.lr_finder.enable and not cfg.test.evaluate and not cfg.model.resume:
        aux_lr, model, optimizer, scheduler = run_lr_finder(
            cfg,
            datamanager,
            model,
            optimizer,
            scheduler,
            args.classes,
            rebuild_model=True,
            gpu_num=args.gpu_num,
            split_models=args.split_models)

    log_dir = cfg.data.tb_log_dir if cfg.data.tb_log_dir else cfg.data.save_dir
    run_training(cfg,
                 datamanager,
                 model,
                 optimizer,
                 scheduler,
                 extra_device_ids,
                 aux_lr,
                 tb_writer=SummaryWriter(log_dir=log_dir),
                 should_freeze_aux_models=should_freeze_aux_models,
                 nncf_metainfo=nncf_metainfo,
                 compression_ctrl=compression_ctrl)
예제 #11
0
def main():
    parser = build_base_argparser()
    args = parser.parse_args()

    cfg = get_default_config()
    cfg.use_gpu = torch.cuda.is_available() and args.gpu_num > 0
    if args.config_file:
        merge_from_files_with_base(cfg, args.config_file)
    reset_config(cfg, args)
    cfg.merge_from_list(args.opts)

    is_ie_model = cfg.model.load_weights.endswith('.xml')
    if not is_ie_model:
        compression_hyperparams = get_compression_hyperparams(
            cfg.model.load_weights)
        is_nncf_used = compression_hyperparams[
            'enable_quantization'] or compression_hyperparams['enable_pruning']

        if is_nncf_used:
            print(f'Using NNCF -- making NNCF changes in config')
            cfg = make_nncf_changes_in_config(
                cfg, compression_hyperparams['enable_quantization'],
                compression_hyperparams['enable_pruning'], args.opts)
    else:
        is_nncf_used = False

    set_random_seed(cfg.train.seed)

    log_name = 'test.log' + time.strftime('-%Y-%m-%d-%H-%M-%S')
    sys.stdout = Logger(osp.join(cfg.data.save_dir, log_name))
    datamanager = torchreid.data.ImageDataManager(filter_classes=args.classes,
                                                  **imagedata_kwargs(cfg))
    num_classes = len(
        datamanager.test_loader[cfg.data.targets[0]]['query'].dataset.classes)
    cfg.train.ema.enable = False
    if not is_ie_model:
        model = torchreid.models.build_model(**model_kwargs(cfg, num_classes))
        load_pretrained_weights(model, cfg.model.load_weights)
        if is_nncf_used:
            print('Begin making NNCF changes in model')
            model = make_nncf_changes_in_eval(model, cfg)
            print('End making NNCF changes in model')
        if cfg.use_gpu:
            num_devices = min(torch.cuda.device_count(), args.gpu_num)
            main_device_ids = list(range(num_devices))
            model = DataParallel(model,
                                 device_ids=main_device_ids,
                                 output_device=0).cuda(main_device_ids[0])
    else:
        from torchreid.utils.ie_tools import VectorCNN
        from openvino.inference_engine import IECore
        cfg.test.batch_size = 1
        model = VectorCNN(IECore(),
                          cfg.model.load_weights,
                          'CPU',
                          switch_rb=True,
                          **model_kwargs(cfg, num_classes))
        for _, dataloader in datamanager.test_loader.items():
            dataloader['query'].dataset.transform.transforms = \
                dataloader['query'].dataset.transform.transforms[:-2]

    if cfg.model.type == 'classification':
        check_classification_classes(model,
                                     datamanager,
                                     args.classes,
                                     test_only=True)

    engine = build_engine(cfg=cfg,
                          datamanager=datamanager,
                          model=model,
                          optimizer=None,
                          scheduler=None)
    engine.test(0,
                dist_metric=cfg.test.dist_metric,
                normalize_feature=cfg.test.normalize_feature,
                visrank=cfg.test.visrank,
                visrank_topk=cfg.test.visrank_topk,
                save_dir=cfg.data.save_dir,
                use_metric_cuhk03=cfg.cuhk03.use_metric_cuhk03,
                ranks=(1, 5, 10, 20),
                rerank=cfg.test.rerank)
예제 #12
0
def main():
    # parse arguments
    parser = build_base_argparser()
    parser.add_argument('-e',
                        '--auxiliary-models-cfg',
                        type=str,
                        nargs='*',
                        default='',
                        help='path to extra config files')
    parser.add_argument('--split-models',
                        action='store_true',
                        help='whether to split models on own gpu')
    parser.add_argument(
        '--aux-config-opts',
        nargs='+',
        default=None,
        help='Modify aux config options using the command-line')
    parser.add_argument('--epochs',
                        default=10,
                        type=int,
                        help='amount of the epochs')

    args = parser.parse_args()
    cfg = get_default_config()
    cfg.use_gpu = torch.cuda.is_available() and args.gpu_num > 0
    if args.config_file:
        merge_from_files_with_base(cfg, args.config_file)
    reset_config(cfg, args)
    cfg.merge_from_list(args.opts)

    set_random_seed(cfg.train.seed, cfg.train.deterministic)

    log_name = 'optuna.log'
    log_name += time.strftime('-%Y-%m-%d-%H-%M-%S')
    sys.stdout = Logger(osp.join(cfg.data.save_dir, log_name))

    print('Show configuration\n{}\n'.format(cfg))

    if cfg.use_gpu:
        torch.backends.cudnn.benchmark = True

    sampler = TPESampler(n_startup_trials=5, seed=True)
    study = optuna.create_study(study_name='classification task',
                                direction="maximize",
                                sampler=sampler)
    objective_partial = partial(objective, cfg, args)
    try:
        start_time = time.time()
        study.optimize(objective_partial,
                       n_trials=cfg.lr_finder.n_trials,
                       timeout=None)
        elapsed = round(time.time() - start_time)
        print(
            f"--- optimization is finished: {datetime.timedelta(seconds=elapsed)} ---"
        )

    except KeyboardInterrupt:
        finish_process(study)

    else:
        finish_process(study)
예제 #13
0
def main():

    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--config-file',
                        type=str,
                        default='',
                        help='Path to config file')
    parser.add_argument('--output-name',
                        type=str,
                        default='model',
                        help='Path to save ONNX model')
    parser.add_argument('--opset', type=int, default=9)
    parser.add_argument('--verbose',
                        default=False,
                        action='store_true',
                        help='Verbose mode for onnx.export')
    parser.add_argument('opts',
                        default=None,
                        nargs=argparse.REMAINDER,
                        help='Modify config options using the command-line')
    args = parser.parse_args()

    cfg = get_default_config()
    cfg.use_gpu = torch.cuda.is_available()
    if args.config_file:
        cfg.merge_from_file(args.config_file)
    reset_config(cfg)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    num_classes = parse_num_classes(cfg.data.sources)
    model = build_model(**model_kwargs(cfg, num_classes))
    load_pretrained_weights(model, cfg.model.load_weights)
    model.eval()

    transform = build_inference_transform(
        cfg.data.height,
        cfg.data.width,
        norm_mean=cfg.data.norm_mean,
        norm_std=cfg.data.norm_std,
    )

    input_img = random_image(cfg.data.height, cfg.data.width)
    input_blob = transform(input_img).unsqueeze(0)

    input_names = ['data']
    output_names = ['reid_embedding']
    dynamic_axes = {
        'data': {
            0: 'batch_size',
            1: 'channels',
            2: 'height',
            3: 'width'
        },
        'reid_embedding': {
            0: 'batch_size',
            1: 'dim'
        }
    }

    output_file_path = args.output_name
    if not args.output_name.endswith('.onnx'):
        output_file_path += '.onnx'

    register_op("group_norm", group_norm_symbolic, "", args.opset)
    with torch.no_grad():
        torch.onnx.export(
            model,
            input_blob,
            output_file_path,
            verbose=args.verbose,
            export_params=True,
            input_names=input_names,
            output_names=output_names,
            dynamic_axes=dynamic_axes,
            opset_version=args.opset,
            operator_export_type=torch.onnx.OperatorExportTypes.ONNX)

    net_from_onnx = onnx.load(output_file_path)
    try:
        onnx.checker.check_model(net_from_onnx)
        print('ONNX check passed.')
    except onnx.onnx_cpp2py_export.checker.ValidationError as ex:
        print('ONNX check failed: {}.'.format(ex))
예제 #14
0
def main():
    parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
    parser.add_argument('--config-file', '-c', type=str, required=True)
    parser.add_argument('--weights', '-w', type=str, required=True)
    parser.add_argument('--root', '-r', type=str, required=True)
    parser.add_argument('--out-dir', '-o', type=str, required=True)
    parser.add_argument('--matrix-size',
                        '-ms',
                        type=int,
                        required=False,
                        default=8)
    parser.add_argument('opts', default=None, nargs=REMAINDER)
    args = parser.parse_args()

    assert exists(args.config_file)
    assert exists(args.weights)
    assert exists(args.root)

    create_dirs(args.out_dir)

    cfg = get_default_config()
    cfg.use_gpu = torch.cuda.is_available()
    if args.config_file:
        merge_from_files_with_base(cfg, args.config_file)
    reset_config(cfg, args)
    cfg.merge_from_list(args.opts)

    if cfg.use_gpu:
        torch.backends.cudnn.benchmark = True

    target_dataset = 'cityflow'
    data_query = build_query(cfg, target_dataset)
    data_gallery, gallery_size = build_gallery(cfg, target_dataset)

    print('Building model: {}'.format(cfg.model.name))
    model = torchreid.models.build_model(**model_kwargs(cfg, [0]))

    print('Loading model: {}'.format(cfg.model.load_weights))
    cfg.model.load_weights = args.weights
    load_pretrained_weights(model, cfg.model.load_weights)
    model = model.cuda() if cfg.use_gpu else model

    print('Extracting query embeddings ...')
    images_query, embeddings_query, ids_query = run_model(
        model, data_query, cfg.use_gpu)

    print('Extracting gallery embeddings ...')
    images_gallery, embeddings_gallery, ids_gallery = run_model(
        model, data_gallery, cfg.use_gpu)

    print('Calculating distance matrices ...')
    distance_matrix_qg = calculate_distances(embeddings_query,
                                             embeddings_gallery)

    print('Finding matches ...')
    top_k = args.matrix_size**2 - 1
    matches = find_matches(distance_matrix_qg, top_k=top_k)

    print('Dumping visualizations ...')
    visualize_matches(matches, images_query, images_gallery, ids_query,
                      ids_gallery, args.matrix_size, args.out_dir)
예제 #15
0
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--config-file',
                        type=str,
                        default='',
                        required=True,
                        help='path to config file')
    parser.add_argument(
        '--custom-roots',
        type=str,
        nargs='+',
        help=
        'types or paths to annotation of custom datasets (delimited by space)')
    parser.add_argument('--custom-types',
                        type=str,
                        nargs='+',
                        help='path of custom datasets (delimited by space)')
    parser.add_argument('--custom-names',
                        type=str,
                        nargs='+',
                        help='names of custom datasets (delimited by space)')
    parser.add_argument('--root',
                        type=str,
                        default='',
                        help='path to data root')
    parser.add_argument('--classes',
                        type=str,
                        nargs='+',
                        help='name of classes in classification dataset')
    parser.add_argument('--out')
    parser.add_argument('opts',
                        default=None,
                        nargs=argparse.REMAINDER,
                        help='Modify config options using the command-line')
    args = parser.parse_args()

    cfg = get_default_config()
    cfg.use_gpu = torch.cuda.is_available()
    if args.config_file:
        merge_from_files_with_base(cfg, args.config_file)
    reset_config(cfg, args)
    cfg.merge_from_list(args.opts)
    set_random_seed(cfg.train.seed)

    print('Show configuration\n{}\n'.format(cfg))
    print('Collecting env info ...')
    print('** System info **\n{}\n'.format(collect_env_info()))

    if cfg.use_gpu:
        torch.backends.cudnn.benchmark = True

    datamanager = build_datamanager(cfg, args.classes)
    num_train_classes = datamanager.num_train_pids

    print('Building main model: {}'.format(cfg.model.name))
    model = torchreid.models.build_model(
        **model_kwargs(cfg, num_train_classes))
    macs, num_params = get_model_complexity_info(
        model, (3, cfg.data.height, cfg.data.width),
        as_strings=False,
        verbose=False,
        print_per_layer_stat=False)
    print('Main model complexity: M params={:,} G flops={:,}'.format(
        num_params / 10**6, macs * 2 / 10**9))

    if args.out:
        out = list()
        out.append({
            'key': 'size',
            'display_name': 'Size',
            'value': num_params / 10**6,
            'unit': 'Mp'
        })
        out.append({
            'key': 'complexity',
            'display_name': 'Complexity',
            'value': 2 * macs / 10**9,
            'unit': 'GFLOPs'
        })
        print('dump to' + args.out)
        with open(args.out, 'w') as write_file:
            json.dump(out, write_file, indent=4)