예제 #1
0
def build_gallery(cfg, target_name):
    data_config = imagedata_kwargs(cfg)
    dataset = build_dataset(mode='gallery',
                            target_name=target_name,
                            **data_config)
    data_loader = build_data_loader(dataset, use_gpu=cfg.use_gpu)

    return data_loader, len(dataset)
예제 #2
0
def prepare_data(cfg, mode='query'):
    data_config = imagedata_kwargs(cfg)
    dataset = build_dataset(mode=mode, **data_config)
    data_loader = build_data_loader(dataset, use_gpu=cfg.use_gpu)

    pids = dataset.num_train_pids
    keys = sorted(pids.keys())
    pids = [pids[key] for key in keys]

    return data_loader, pids
예제 #3
0
    def train(self,
              dataset: DatasetEntity,
              output_model: ModelEntity,
              train_parameters: Optional[TrainParameters] = None):
        """ Trains a model on a dataset """

        train_model = deepcopy(self._model)

        if train_parameters is not None:
            update_progress_callback = train_parameters.update_progress
        else:
            update_progress_callback = default_progress_callback
        time_monitor = TrainingProgressCallback(
            update_progress_callback,
            num_epoch=self._cfg.train.max_epoch,
            num_train_steps=math.ceil(
                len(dataset.get_subset(Subset.TRAINING)) /
                self._cfg.train.batch_size),
            num_val_steps=0,
            num_test_steps=0)

        self.metrics_monitor = DefaultMetricsMonitor()
        self.stop_callback.reset()

        set_random_seed(self._cfg.train.seed)
        train_subset = dataset.get_subset(Subset.TRAINING)
        val_subset = dataset.get_subset(Subset.VALIDATION)
        self._cfg.custom_datasets.roots = [
            OTEClassificationDataset(train_subset,
                                     self._labels,
                                     self._multilabel,
                                     keep_empty_label=self._empty_label
                                     in self._labels),
            OTEClassificationDataset(val_subset,
                                     self._labels,
                                     self._multilabel,
                                     keep_empty_label=self._empty_label
                                     in self._labels)
        ]
        datamanager = torchreid.data.ImageDataManager(
            **imagedata_kwargs(self._cfg))

        num_aux_models = len(self._cfg.mutual_learning.aux_configs)

        if self._cfg.use_gpu:
            main_device_ids = list(range(self.num_devices))
            extra_device_ids = [main_device_ids for _ in range(num_aux_models)]
            train_model = DataParallel(train_model,
                                       device_ids=main_device_ids,
                                       output_device=0).cuda(
                                           main_device_ids[0])
        else:
            extra_device_ids = [None for _ in range(num_aux_models)]

        optimizer = torchreid.optim.build_optimizer(
            train_model, **optimizer_kwargs(self._cfg))

        if self._cfg.lr_finder.enable:
            scheduler = None
        else:
            scheduler = torchreid.optim.build_lr_scheduler(
                optimizer,
                num_iter=datamanager.num_iter,
                **lr_scheduler_kwargs(self._cfg))

        if self._cfg.lr_finder.enable:
            _, train_model, optimizer, scheduler = \
                        run_lr_finder(self._cfg, datamanager, train_model, optimizer, scheduler, None,
                                      rebuild_model=False, gpu_num=self.num_devices, split_models=False)

        _, final_acc = run_training(self._cfg,
                                    datamanager,
                                    train_model,
                                    optimizer,
                                    scheduler,
                                    extra_device_ids,
                                    self._cfg.train.lr,
                                    tb_writer=self.metrics_monitor,
                                    perf_monitor=time_monitor,
                                    stop_callback=self.stop_callback)

        training_metrics = self._generate_training_metrics_group()

        self.metrics_monitor.close()
        if self.stop_callback.check_stop():
            logger.info('Training cancelled.')
            return

        logger.info("Training finished.")

        best_snap_path = os.path.join(self._scratch_space, 'best.pth')
        if os.path.isfile(best_snap_path):
            load_pretrained_weights(self._model, best_snap_path)

        for filename in os.listdir(self._scratch_space):
            match = re.match(r'best_(aux_model_[0-9]+\.pth)', filename)
            if match:
                aux_model_name = match.group(1)
                best_aux_snap_path = os.path.join(self._scratch_space,
                                                  filename)
                self._aux_model_snap_paths[aux_model_name] = best_aux_snap_path

        self.save_model(output_model)
        performance = Performance(score=ScoreMetric(value=final_acc,
                                                    name="accuracy"),
                                  dashboard_metrics=training_metrics)
        logger.info(f'FINAL MODEL PERFORMANCE {performance}')
        output_model.performance = performance
예제 #4
0
def main():
    parser = build_base_argparser()
    args = parser.parse_args()

    cfg = get_default_config()
    cfg.use_gpu = torch.cuda.is_available() and args.gpu_num > 0
    if args.config_file:
        merge_from_files_with_base(cfg, args.config_file)
    reset_config(cfg, args)
    cfg.merge_from_list(args.opts)

    is_ie_model = cfg.model.load_weights.endswith('.xml')
    if not is_ie_model:
        compression_hyperparams = get_compression_hyperparams(
            cfg.model.load_weights)
        is_nncf_used = compression_hyperparams[
            'enable_quantization'] or compression_hyperparams['enable_pruning']

        if is_nncf_used:
            print(f'Using NNCF -- making NNCF changes in config')
            cfg = make_nncf_changes_in_config(
                cfg, compression_hyperparams['enable_quantization'],
                compression_hyperparams['enable_pruning'], args.opts)
    else:
        is_nncf_used = False

    set_random_seed(cfg.train.seed)

    log_name = 'test.log' + time.strftime('-%Y-%m-%d-%H-%M-%S')
    sys.stdout = Logger(osp.join(cfg.data.save_dir, log_name))
    datamanager = torchreid.data.ImageDataManager(filter_classes=args.classes,
                                                  **imagedata_kwargs(cfg))
    num_classes = len(
        datamanager.test_loader[cfg.data.targets[0]]['query'].dataset.classes)
    cfg.train.ema.enable = False
    if not is_ie_model:
        model = torchreid.models.build_model(**model_kwargs(cfg, num_classes))
        load_pretrained_weights(model, cfg.model.load_weights)
        if is_nncf_used:
            print('Begin making NNCF changes in model')
            model = make_nncf_changes_in_eval(model, cfg)
            print('End making NNCF changes in model')
        if cfg.use_gpu:
            num_devices = min(torch.cuda.device_count(), args.gpu_num)
            main_device_ids = list(range(num_devices))
            model = DataParallel(model,
                                 device_ids=main_device_ids,
                                 output_device=0).cuda(main_device_ids[0])
    else:
        from torchreid.utils.ie_tools import VectorCNN
        from openvino.inference_engine import IECore
        cfg.test.batch_size = 1
        model = VectorCNN(IECore(),
                          cfg.model.load_weights,
                          'CPU',
                          switch_rb=True,
                          **model_kwargs(cfg, num_classes))
        for _, dataloader in datamanager.test_loader.items():
            dataloader['query'].dataset.transform.transforms = \
                dataloader['query'].dataset.transform.transforms[:-2]

    if cfg.model.type == 'classification':
        check_classification_classes(model,
                                     datamanager,
                                     args.classes,
                                     test_only=True)

    engine = build_engine(cfg=cfg,
                          datamanager=datamanager,
                          model=model,
                          optimizer=None,
                          scheduler=None)
    engine.test(0,
                dist_metric=cfg.test.dist_metric,
                normalize_feature=cfg.test.normalize_feature,
                visrank=cfg.test.visrank,
                visrank_topk=cfg.test.visrank_topk,
                save_dir=cfg.data.save_dir,
                use_metric_cuhk03=cfg.cuhk03.use_metric_cuhk03,
                ranks=(1, 5, 10, 20),
                rerank=cfg.test.rerank)
    def infer(
        self,
        dataset: DatasetEntity,
        inference_parameters: Optional[InferenceParameters] = None
    ) -> DatasetEntity:
        """
        Perform inference on the given dataset.

        :param dataset: Dataset entity to analyse
        :param inference_parameters: Additional parameters for inference.
            For example, when results are generated for evaluation purposes, Saliency maps can be turned off.
        :return: Dataset that also includes the classification results
        """
        if len(dataset) == 0:
            logger.warning("Empty dataset has been passed for the inference.")
            return dataset

        if inference_parameters is not None:
            update_progress_callback = inference_parameters.update_progress
        else:
            update_progress_callback = default_progress_callback

        self._cfg.test.batch_size = max(
            1, self._hyperparams.learning_parameters.batch_size // 2)
        self._cfg.data.workers = max(
            min(self._cfg.data.workers,
                len(dataset) - 1), 0)

        time_monitor = InferenceProgressCallback(
            math.ceil(len(dataset) / self._cfg.test.batch_size),
            update_progress_callback)

        self._cfg.custom_datasets.roots = [
            OTEClassificationDataset(dataset,
                                     self._labels,
                                     self._multilabel,
                                     keep_empty_label=self._empty_label
                                     in self._labels),
            OTEClassificationDataset(dataset,
                                     self._labels,
                                     self._multilabel,
                                     keep_empty_label=self._empty_label
                                     in self._labels)
        ]
        datamanager = torchreid.data.ImageDataManager(
            **imagedata_kwargs(self._cfg))
        mix_precision_status = self._model.mix_precision
        self._model.mix_precision = False
        self._model.eval()
        self._model.to(self.device)
        targets = list(datamanager.test_loader.keys())
        dump_features = not inference_parameters.is_evaluation
        inference_results, _ = score_extraction(
            datamanager.test_loader[targets[0]]['query'],
            self._model,
            self._cfg.use_gpu,
            perf_monitor=time_monitor,
            feature_dump_mode='all' if dump_features else 'vecs')
        self._model.mix_precision = mix_precision_status
        if dump_features:
            scores, features, feature_vecs = inference_results
            features = preprocess_features_for_actmap(features)
        else:
            scores, feature_vecs = inference_results

        if self._multilabel:
            scores = sigmoid_numpy(scores)

        for i in range(scores.shape[0]):
            dataset_item = dataset[i]

            if self._multilabel:
                item_labels = get_multilabel_predictions(scores[i],
                                                         self._labels,
                                                         activate=False)
                if not item_labels:
                    item_labels = [
                        ScoredLabel(self._empty_label, probability=1.)
                    ]
            else:
                scores[i] = softmax_numpy(scores[i])
                item_labels = get_multiclass_predictions(scores[i],
                                                         self._labels,
                                                         activate=False)
                if self._hierarchical:
                    item_labels.extend(
                        get_ancestors_by_prediction(
                            self._task_environment.label_schema,
                            item_labels[0]))

            dataset_item.append_labels(item_labels)
            active_score = active_score_from_probs(scores[i])
            active_score_media = FloatMetadata(
                name="active_score",
                value=active_score,
                float_type=FloatType.ACTIVE_SCORE)
            dataset_item.append_metadata_item(
                active_score_media, model=self._task_environment.model)
            feature_vec_media = TensorEntity(name="representation_vector",
                                             numpy=feature_vecs[i])
            dataset_item.append_metadata_item(
                feature_vec_media, model=self._task_environment.model)

            if dump_features:
                actmap = get_actmap(features[i],
                                    (dataset_item.width, dataset_item.height))
                saliency_media = ResultMediaEntity(
                    name="saliency_map",
                    type="Saliency map",
                    annotation_scene=dataset_item.annotation_scene,
                    numpy=actmap,
                    roi=dataset_item.roi)
                dataset_item.append_metadata_item(
                    saliency_media, model=self._task_environment.model)

        return dataset
예제 #6
0
    def optimize(
        self,
        optimization_type: OptimizationType,
        dataset: DatasetEntity,
        output_model: ModelEntity,
        optimization_parameters: Optional[OptimizationParameters],
    ):
        """ Optimize a model on a dataset """
        if optimization_type is not OptimizationType.NNCF:
            raise RuntimeError('NNCF is the only supported optimization')
        if self._compression_ctrl:
            raise RuntimeError('The model is already optimized. NNCF requires the original model for optimization.')
        if self._cfg.train.ema.enable:
            raise RuntimeError('EMA model could not be used together with NNCF compression')
        if self._cfg.lr_finder.enable:
            raise RuntimeError('LR finder could not be used together with NNCF compression')

        aux_pretrained_dicts = self._load_aux_models_data(self._task_environment.model)
        num_aux_models = len(self._cfg.mutual_learning.aux_configs)
        num_aux_pretrained_dicts = len(aux_pretrained_dicts)
        if num_aux_models != num_aux_pretrained_dicts:
            raise RuntimeError('The pretrained weights are not provided for all aux models.')

        if optimization_parameters is not None:
            update_progress_callback = optimization_parameters.update_progress
        else:
            update_progress_callback = default_progress_callback
        time_monitor = TrainingProgressCallback(update_progress_callback, num_epoch=self._cfg.train.max_epoch,
                                                num_train_steps=math.ceil(len(dataset.get_subset(Subset.TRAINING)) /
                                                                          self._cfg.train.batch_size),
                                                num_val_steps=0, num_test_steps=0)

        self.metrics_monitor = DefaultMetricsMonitor()
        self.stop_callback.reset()

        set_random_seed(self._cfg.train.seed)
        train_subset = dataset.get_subset(Subset.TRAINING)
        val_subset = dataset.get_subset(Subset.VALIDATION)
        self._cfg.custom_datasets.roots = [OTEClassificationDataset(train_subset, self._labels, self._multilabel,
                                                                    keep_empty_label=self._empty_label in self._labels),
                                           OTEClassificationDataset(val_subset, self._labels, self._multilabel,
                                                                    keep_empty_label=self._empty_label in self._labels)]
        datamanager = torchreid.data.ImageDataManager(**imagedata_kwargs(self._cfg))

        self._compression_ctrl, self._model, self._nncf_metainfo = \
            wrap_nncf_model(self._model, self._cfg, datamanager_for_init=datamanager)

        self._cfg.train.lr = calculate_lr_for_nncf_training(self._cfg, self._initial_lr, False)

        train_model = self._model
        if self._cfg.use_gpu:
            main_device_ids = list(range(self.num_devices))
            extra_device_ids = [main_device_ids for _ in range(num_aux_models)]
            train_model = DataParallel(train_model, device_ids=main_device_ids,
                                       output_device=0).cuda(main_device_ids[0])
        else:
            extra_device_ids = [None for _ in range(num_aux_models)]

        optimizer = torchreid.optim.build_optimizer(train_model, **optimizer_kwargs(self._cfg))

        scheduler = torchreid.optim.build_lr_scheduler(optimizer, num_iter=datamanager.num_iter,
                                                       **lr_scheduler_kwargs(self._cfg))

        logger.info('Start training')
        run_training(self._cfg, datamanager, train_model, optimizer,
                     scheduler, extra_device_ids, self._cfg.train.lr,
                     should_freeze_aux_models=True,
                     aux_pretrained_dicts=aux_pretrained_dicts,
                     tb_writer=self.metrics_monitor,
                     perf_monitor=time_monitor,
                     stop_callback=self.stop_callback,
                     nncf_metainfo=self._nncf_metainfo,
                     compression_ctrl=self._compression_ctrl)

        self.metrics_monitor.close()
        if self.stop_callback.check_stop():
            logger.info('Training cancelled.')
            return

        logger.info('Training completed')

        self.save_model(output_model)

        output_model.model_format = ModelFormat.BASE_FRAMEWORK
        output_model.optimization_type = self._optimization_type
        output_model.optimization_methods = self._optimization_methods
        output_model.precision = self._precision
예제 #7
0
def build_datamanager(cfg):
    if cfg.data.type == 'image':
        return torchreid.data.ImageDataManager(**imagedata_kwargs(cfg))
    else:
        return torchreid.data.VideoDataManager(**videodata_kwargs(cfg))
예제 #8
0
def build_datamanager(cfg, classification_classes_filter=None):
    return torchreid.data.ImageDataManager(
        filter_classes=classification_classes_filter, **imagedata_kwargs(cfg))