def _load_model(self,
                    model: ModelEntity,
                    device: torch.device,
                    pretrained_dict: Optional[Dict] = None):
        if model is not None:
            # If a model has been trained and saved for the task already, create empty model and load weights here
            if pretrained_dict is None:
                buffer = io.BytesIO(model.get_data("weights.pth"))
                model_data = torch.load(buffer,
                                        map_location=torch.device('cpu'))
            else:
                model_data = pretrained_dict

            model = self._create_model(self._cfg, from_scratch=True)

            try:
                load_pretrained_weights(model, pretrained_dict=model_data)
                logger.info("Loaded model weights from Task Environment")
            except BaseException as ex:
                raise ValueError("Could not load the saved model. The model file structure is invalid.") \
                    from ex
        else:
            # If there is no trained model yet, create model with pretrained weights as defined in the model config
            # file.
            model = self._create_model(self._cfg, from_scratch=False)
            logger.info(
                "No trained model in project yet. Created new model with general-purpose pretrained weights."
            )
        return model.to(device)
    def _save_model(self,
                    output_model: ModelEntity,
                    state_dict: Optional[Dict] = None):
        """
        Save model
        """
        buffer = io.BytesIO()
        hyperparams = self._task_environment.get_hyper_parameters(
            OTEClassificationParameters)
        hyperparams_str = ids_to_strings(
            cfg_helper.convert(hyperparams, dict, enum_to_str=True))
        modelinfo = {
            'model': self._model.state_dict(),
            'config': hyperparams_str,
            'VERSION': 1
        }

        if state_dict is not None:
            modelinfo.update(state_dict)

        torch.save(modelinfo, buffer)
        output_model.set_data('weights.pth', buffer.getvalue())
        output_model.set_data(
            'label_schema.json',
            label_schema_to_bytes(self._task_environment.label_schema))
    def __load_weights(path: str, output_model: ModelEntity, key: str) -> None:
        """
        Load weights into output model

        Args:
            path (str): Path to weights
            output_model (ModelEntity): Model to which the weights are assigned
            key (str): Key of the output model into which the weights are assigned
        """
        with open(path, "rb") as file:
            output_model.set_data(key, file.read())
    def test_model_entity_default_values(self):
        """
        <b>Description:</b>
        Check that ModelEntity correctly returns the default values

        <b>Expected results:</b>
        Test passes if ModelEntity correctly returns the default values

        <b>Steps</b>
        1. Check default values in the ModelEntity
        """

        model_entity = ModelEntity(train_dataset=self.dataset(),
                                   configuration=self.configuration())

        assert model_entity.id == ID()
        assert type(model_entity.configuration) == ModelConfiguration
        assert type(model_entity.creation_date) == datetime
        assert type(model_entity.train_dataset) == DatasetEntity
        assert model_entity.version == 1
        assert model_entity.model_status == ModelStatus.SUCCESS
        assert model_entity.model_format == ModelFormat.OPENVINO
        assert model_entity.precision == [ModelPrecision.FP32]
        assert model_entity.target_device == TargetDevice.CPU
        assert model_entity.optimization_type == ModelOptimizationType.NONE
        assert model_entity.performance == NullPerformance()

        for default_val_none in [
                "previous_trained_revision",
                "previous_revision",
                "target_device_type",
        ]:
            assert getattr(model_entity, default_val_none) is None

        for default_val_0_0 in ["training_duration", "model_size_reduction"]:
            assert getattr(model_entity, default_val_0_0) == 0.0

        for default_val_empty_list in ["tags", "optimization_methods"]:
            assert getattr(model_entity, default_val_empty_list) == []

        for default_val_empty_dict in [
                "model_adapters",
                "optimization_objectives",
                "performance_improvement",
        ]:
            assert getattr(model_entity, default_val_empty_dict) == {}

        for default_val_zero in ["latency", "fps_throughput"]:
            assert getattr(model_entity, default_val_zero) == 0

        assert model_entity.is_optimized() is False
Ejemplo n.º 5
0
    def export(self, export_type: ExportType, output_model: ModelEntity):
        """Export model to OpenVINO IR

        Args:
            export_type (ExportType): Export type should be ExportType.OPENVINO
            output_model (ModelEntity): The model entity in which to write the OpenVINO IR data

        Raises:
            Exception: If export_type is not ExportType.OPENVINO
        """
        assert export_type == ExportType.OPENVINO

        # pylint: disable=no-member; need to refactor this
        height, width = self.config.model.input_size
        onnx_path = os.path.join(self.config.project.path, "onnx_model.onnx")
        torch.onnx.export(
            model=self.model.model,
            args=torch.zeros((1, 3, height, width)).to(self.model.device),
            f=onnx_path,
            opset_version=11,
        )
        optimize_command = "mo --input_model " + onnx_path + " --output_dir " + self.config.project.path
        subprocess.call(optimize_command, shell=True)
        bin_file = glob(os.path.join(self.config.project.path, "*.bin"))[0]
        xml_file = glob(os.path.join(self.config.project.path, "*.xml"))[0]
        with open(bin_file, "rb") as file:
            output_model.set_data("openvino.bin", file.read())
        with open(xml_file, "rb") as file:
            output_model.set_data("openvino.xml", file.read())
        output_model.set_data(
            "label_schema.json",
            label_schema_to_bytes(self.task_environment.label_schema))
        output_model.set_data(
            "threshold", bytes(struct.pack("f", self.model.threshold.item())))
Ejemplo n.º 6
0
    def test_training_interface(self):
        """
        <b>Description:</b>
        Check ITrainingTask class object initialization

        <b>Input data:</b>
        ITrainingTask object

        <b>Expected results:</b>
        Test passes if ITrainingTask object methods raise NotImplementedError exception
        """
        i_training_task = ITrainingTask()
        dataset = DatasetEntity()
        configuration = ModelConfiguration(
            configurable_parameters=ConfigurableParameters(
                header="Test Header"),
            label_schema=LabelSchemaEntity(),
        )
        model_entity = ModelEntity(configuration=configuration,
                                   train_dataset=dataset)
        train_parameters = TrainParameters()

        with pytest.raises(NotImplementedError):
            i_training_task.save_model(model_entity)
        with pytest.raises(NotImplementedError):
            i_training_task.train(
                dataset=dataset,
                output_model=model_entity,
                train_parameters=train_parameters,
            )
        with pytest.raises(NotImplementedError):
            i_training_task.cancel_training()
Ejemplo n.º 7
0
    def test_optimization_interface(self):
        """
        <b>Description:</b>
        Check IOptimizationTask class object initialization

        <b>Input data:</b>
        IOptimizationTask object

        <b>Expected results:</b>
        Test passes if IOptimizationTask object optimize method raises NotImplementedError exception
        """
        dataset = DatasetEntity()
        configuration = ModelConfiguration(
            configurable_parameters=ConfigurableParameters(
                header="Test Header"),
            label_schema=LabelSchemaEntity(),
        )
        model_entity = ModelEntity(configuration=configuration,
                                   train_dataset=dataset)
        optimization_parameters = OptimizationParameters()
        with pytest.raises(NotImplementedError):
            IOptimizationTask().optimize(
                optimization_type=OptimizationType.POT,
                dataset=dataset,
                output_model=model_entity,
                optimization_parameters=optimization_parameters,
            )
Ejemplo n.º 8
0
    def test_evaluate_interface(self):
        """
        <b>Description:</b>
        Check IEvaluationTask class object initialization

        <b>Input data:</b>
        IEvaluationTask object

        <b>Expected results:</b>
        Test passes if IEvaluationTask object evaluate method raises NotImplementedError exception
        """
        dataset = DatasetEntity()
        configuration = ModelConfiguration(
            configurable_parameters=ConfigurableParameters(
                header="Test Header"),
            label_schema=LabelSchemaEntity(),
        )
        model_entity = ModelEntity(configuration=configuration,
                                   train_dataset=dataset)
        with pytest.raises(NotImplementedError):
            IEvaluationTask().evaluate(
                ResultSetEntity(
                    model=model_entity,
                    ground_truth_dataset=dataset,
                    prediction_dataset=dataset,
                ))
Ejemplo n.º 9
0
def read_model(model_configuration, path, train_dataset):
    """
    Creates ModelEntity based on model_configuration and data stored at path.
    """

    if path.endswith(".bin") or path.endswith(".xml"):
        model_adapters = {
            "openvino.xml": ModelAdapter(read_binary(path[:-4] + ".xml")),
            "openvino.bin": ModelAdapter(read_binary(path[:-4] + ".bin")),
        }

        for confidence_threshold in ["confidence_threshold", "threshold"]:
            confidence_threshold_path = os.path.join(
                os.path.dirname(path), confidence_threshold
            )
            if os.path.exists(confidence_threshold_path):
                model_adapters[confidence_threshold] = ModelAdapter(
                    read_binary(confidence_threshold_path)
                )
    else:
        model_adapters = {"weights.pth": ModelAdapter(read_binary(path))}

    model = ModelEntity(
        configuration=model_configuration,
        model_adapters=model_adapters,
        train_dataset=train_dataset,
    )

    return model
Ejemplo n.º 10
0
def run_export(environment, dataset, task, action_name,
               expected_optimization_type):
    logger.debug(
        f'For action "{action_name}": Copy environment for evaluation exported model'
    )

    environment_for_export = deepcopy(environment)

    logger.debug(f'For action "{action_name}": Create exported model')
    exported_model = ModelEntity(
        dataset,
        environment_for_export.get_model_configuration(),
        model_status=ModelStatus.NOT_READY,
    )
    logger.debug("Run export")
    task.export(ExportType.OPENVINO, exported_model)

    assert (
        exported_model.model_status == ModelStatus.SUCCESS
    ), f"In action '{action_name}': Export to OpenVINO was not successful"
    assert (exported_model.model_format == ModelFormat.OPENVINO
            ), f"In action '{action_name}': Wrong model format after export"
    assert (exported_model.optimization_type == expected_optimization_type
            ), f"In action '{action_name}': Wrong optimization type"

    logger.debug(
        f'For action "{action_name}": Set exported model into environment for export'
    )
    environment_for_export.model = exported_model
    return environment_for_export, exported_model
Ejemplo n.º 11
0
    def deploy(self, output_model: ModelEntity) -> None:
        logger.info('Deploying the model')

        work_dir = os.path.dirname(demo.__file__)
        model_file = inspect.getfile(type(self.inferencer.model))
        parameters = {}
        parameters['type_of_model'] = 'ote_classification'
        parameters['converter_type'] = 'CLASSIFICATION'
        parameters['model_parameters'] = self.inferencer.configuration
        parameters['model_parameters']['labels'] = LabelSchemaMapper.forward(
            self.task_environment.label_schema)
        name_of_package = "demo_package"
        with tempfile.TemporaryDirectory() as tempdir:
            copyfile(os.path.join(work_dir, "setup.py"),
                     os.path.join(tempdir, "setup.py"))
            copyfile(os.path.join(work_dir, "requirements.txt"),
                     os.path.join(tempdir, "requirements.txt"))
            set_proper_git_commit_hash(
                os.path.join(tempdir, "requirements.txt"))
            copytree(os.path.join(work_dir, name_of_package),
                     os.path.join(tempdir, name_of_package))
            config_path = os.path.join(tempdir, name_of_package, "config.json")
            with open(config_path, "w", encoding='utf-8') as f:
                json.dump(parameters, f, ensure_ascii=False, indent=4)
            # generate model.py
            if (inspect.getmodule(self.inferencer.model) in [
                    module[1] for module in inspect.getmembers(
                        model_wrappers, inspect.ismodule)
            ]):
                copyfile(model_file,
                         os.path.join(tempdir, name_of_package, "model.py"))
            # create wheel package
            subprocess.run([
                sys.executable,
                os.path.join(tempdir, "setup.py"), 'bdist_wheel', '--dist-dir',
                tempdir, 'clean', '--all'
            ],
                           check=True)
            wheel_file_name = [
                f for f in os.listdir(tempdir) if f.endswith('.whl')
            ][0]

            with ZipFile(os.path.join(tempdir, "openvino.zip"), 'w') as zip_f:
                zip_f.writestr(os.path.join("model", "model.xml"),
                               self.model.get_data("openvino.xml"))
                zip_f.writestr(os.path.join("model", "model.bin"),
                               self.model.get_data("openvino.bin"))
                zip_f.write(os.path.join(tempdir, "requirements.txt"),
                            os.path.join("python", "requirements.txt"))
                zip_f.write(os.path.join(work_dir, "README.md"),
                            os.path.join("python", "README.md"))
                zip_f.write(os.path.join(work_dir, "LICENSE"),
                            os.path.join("python", "LICENSE"))
                zip_f.write(os.path.join(work_dir, "demo.py"),
                            os.path.join("python", "demo.py"))
                zip_f.write(os.path.join(tempdir, wheel_file_name),
                            os.path.join("python", wheel_file_name))
            with open(os.path.join(tempdir, "openvino.zip"), "rb") as file:
                output_model.exportable_code = file.read()
        logger.info('Deploying completed')
Ejemplo n.º 12
0
    def _run_ote_pot(self, data_collector, model_template, dataset,
                     environment_for_export):
        logger.debug("Creating environment and task for POT optimization")
        self.environment_for_pot = deepcopy(environment_for_export)
        self.openvino_task_pot = create_openvino_task(model_template,
                                                      environment_for_export)

        self.optimized_model_pot = ModelEntity(
            dataset,
            self.environment_for_pot.get_model_configuration(),
            model_status=ModelStatus.NOT_READY,
        )
        logger.info("Run POT optimization")
        self.openvino_task_pot.optimize(
            OptimizationType.POT,
            dataset.get_subset(self.pot_subset),
            self.optimized_model_pot,
            OptimizationParameters(),
        )
        assert (self.optimized_model_pot.model_status == ModelStatus.SUCCESS
                ), "POT optimization was not successful"
        assert (self.optimized_model_pot.model_format == ModelFormat.OPENVINO
                ), "Wrong model format after pot"
        assert (self.optimized_model_pot.optimization_type ==
                ModelOptimizationType.POT), "Wrong optimization type"
        logger.info("POT optimization is finished")
    def export(self, export_type: ExportType, output_model: ModelEntity):
        assert export_type == ExportType.OPENVINO
        output_model.model_format = ModelFormat.OPENVINO
        output_model.optimization_type = self._optimization_type

        with tempfile.TemporaryDirectory() as tempdir:
            optimized_model_dir = os.path.join(tempdir, "dor")
            logger.info(
                f'Optimized model will be temporarily saved to "{optimized_model_dir}"'
            )
            os.makedirs(optimized_model_dir, exist_ok=True)
            try:
                onnx_model_path = os.path.join(optimized_model_dir,
                                               'model.onnx')
                mix_precision_status = self._model.mix_precision
                self._model.mix_precision = False
                export_onnx(self._model.eval(), self._cfg, onnx_model_path)
                self._model.mix_precision = mix_precision_status
                export_ir(onnx_model_path,
                          self._cfg.data.norm_mean,
                          self._cfg.data.norm_std,
                          optimized_model_dir=optimized_model_dir)

                bin_file = [
                    f for f in os.listdir(optimized_model_dir)
                    if f.endswith('.bin')
                ][0]
                xml_file = [
                    f for f in os.listdir(optimized_model_dir)
                    if f.endswith('.xml')
                ][0]
                with open(os.path.join(optimized_model_dir, bin_file),
                          "rb") as f:
                    output_model.set_data("openvino.bin", f.read())
                with open(os.path.join(optimized_model_dir, xml_file),
                          "rb") as f:
                    output_model.set_data("openvino.xml", f.read())
                output_model.precision = self._precision
                output_model.optimization_methods = self._optimization_methods
            except Exception as ex:
                raise RuntimeError('Optimization was unsuccessful.') from ex

        output_model.set_data(
            "label_schema.json",
            label_schema_to_bytes(self._task_environment.label_schema))
        logger.info('Exporting completed.')
Ejemplo n.º 14
0
def main():
    """
    Main function that is used for model exporting.
    """

    args = parse_args()

    # Load template.yaml file.
    template = find_and_parse_model_template(args.template)

    # Get class for Task.
    task_class = get_impl_class(template.entrypoints.base)

    # Get hyper parameters schema.
    hyper_parameters = create(template.hyper_parameters.data)
    assert hyper_parameters

    environment = TaskEnvironment(
        model=None,
        hyper_parameters=hyper_parameters,
        label_schema=read_label_schema(
            os.path.join(os.path.dirname(args.load_weights),
                         "label_schema.json")),
        model_template=template,
    )

    model_adapters = {
        "weights.pth": ModelAdapter(read_binary(args.load_weights))
    }
    model = ModelEntity(
        configuration=environment.get_model_configuration(),
        model_adapters=model_adapters,
        train_dataset=None,
    )
    environment.model = model

    task = task_class(task_environment=environment)

    exported_model = ModelEntity(None,
                                 environment.get_model_configuration(),
                                 model_status=ModelStatus.NOT_READY)

    task.export(ExportType.OPENVINO, exported_model)

    os.makedirs(args.save_model_to, exist_ok=True)
    save_model_data(exported_model, args.save_model_to)
    def test_model_entity__eq__(self):
        """
        <b>Description:</b>
        Check that ModelEntity __eq__ method

        <b>Expected results:</b>
        Test passes if ModelEntity equal ModelEntity and not equal another type
        """
        dataset = self.dataset()
        other_model_entity = ModelEntity(train_dataset=dataset,
                                         configuration=self.configuration())
        model_entity = ModelEntity(train_dataset=dataset,
                                   configuration=self.configuration())
        third_model_entity = ModelEntity(
            train_dataset=self.dataset(),
            configuration=self.other_configuration())
        assert model_entity.__eq__("") is False
        assert model_entity == other_model_entity
        assert model_entity != third_model_entity
Ejemplo n.º 16
0
    def _run_ote_training(self, data_collector):
        logger.debug(f"self.template_path = {self.template_path}")

        print(
            f"train dataset: {len(self.dataset.get_subset(Subset.TRAINING))} items"
        )
        print(f"validation dataset: "
              f"{len(self.dataset.get_subset(Subset.VALIDATION))} items")

        logger.debug("Load model template")
        self.model_template = parse_model_template(self.template_path)

        logger.debug("Set hyperparameters")
        params = ote_sdk_configuration_helper_create(
            self.model_template.hyper_parameters.data)
        if self.num_training_iters != KEEP_CONFIG_FIELD_VALUE:
            params.learning_parameters.num_iters = int(self.num_training_iters)
            logger.debug(f"Set params.learning_parameters.num_iters="
                         f"{params.learning_parameters.num_iters}")
        else:
            logger.debug(f"Keep params.learning_parameters.num_iters="
                         f"{params.learning_parameters.num_iters}")

        if self.batch_size != KEEP_CONFIG_FIELD_VALUE:
            params.learning_parameters.batch_size = int(self.batch_size)
            logger.debug(f"Set params.learning_parameters.batch_size="
                         f"{params.learning_parameters.batch_size}")
        else:
            logger.debug(f"Keep params.learning_parameters.batch_size="
                         f"{params.learning_parameters.batch_size}")

        logger.debug("Setup environment")
        self.environment, self.task = self._create_environment_and_task(
            params, self.labels_schema, self.model_template)

        logger.debug("Train model")
        self.output_model = ModelEntity(
            self.dataset,
            self.environment.get_model_configuration(),
            model_status=ModelStatus.NOT_READY,
        )

        self.copy_hyperparams = deepcopy(self.task._hyperparams)

        self.task.train(self.dataset, self.output_model)
        assert (self.output_model.model_status == ModelStatus.SUCCESS
                ), "Training was failed"

        score_name, score_value = self._get_training_performance_as_score_name_value(
        )
        logger.info(f"performance={self.output_model.performance}")
        data_collector.log_final_metric("metric_name",
                                        self.name + "/" + score_name)
        data_collector.log_final_metric("metric_value", score_value)
Ejemplo n.º 17
0
    def _run_ote_nncf(self, data_collector, model_template, dataset,
                      trained_model, environment):
        logger.debug(
            "Get predictions on the validation set for exported model")
        self.environment_for_nncf = deepcopy(environment)

        logger.info("Create NNCF Task")
        nncf_task_class_impl_path = model_template.entrypoints.nncf
        if not nncf_task_class_impl_path:
            pytest.skip("NNCF is not enabled for this template")

        if not is_nncf_enabled():
            pytest.skip("NNCF is not installed")

        logger.info("Creating NNCF task and structures")
        self.nncf_model = ModelEntity(
            dataset,
            self.environment_for_nncf.get_model_configuration(),
            model_status=ModelStatus.NOT_READY,
        )
        self.nncf_model.set_data("weights.pth",
                                 trained_model.get_data("weights.pth"))

        self.environment_for_nncf.model = self.nncf_model

        nncf_task_cls = get_impl_class(nncf_task_class_impl_path)
        self.nncf_task = nncf_task_cls(
            task_environment=self.environment_for_nncf)

        logger.info("Run NNCF optimization")
        self.nncf_task.optimize(OptimizationType.NNCF, dataset,
                                self.nncf_model, None)
        assert (self.nncf_model.model_status == ModelStatus.SUCCESS
                ), "NNCF optimization was not successful"
        assert (self.nncf_model.optimization_type == ModelOptimizationType.NNCF
                ), "Wrong optimization type"
        assert (self.nncf_model.model_format == ModelFormat.BASE_FRAMEWORK
                ), "Wrong model format"
        logger.info("NNCF optimization is finished")
Ejemplo n.º 18
0
 def metadata_item_with_model() -> MetadataItemEntity:
     data = TensorEntity(
         name="appended_metadata_with_model",
         numpy=np.random.randint(low=0, high=255, size=(10, 15, 3)),
     )
     configuration = ModelConfiguration(
         configurable_parameters=ConfigurableParameters(
             header="Test Header"),
         label_schema=LabelSchemaEntity(),
     )
     model = ModelEntity(configuration=configuration,
                         train_dataset=DatasetEntity())
     metadata_item_with_model = MetadataItemEntity(data=data, model=model)
     return metadata_item_with_model
Ejemplo n.º 19
0
def main():
    """
    Main function that is used for model evaluation.
    """

    # Parses input arguments.
    args = parse_args()

    # Reads model template file.
    template = find_and_parse_model_template(args.template)

    # Get hyper parameters schema.
    hyper_parameters = template.hyper_parameters.data
    assert hyper_parameters

    # Get classes for Task, ConfigurableParameters and Dataset.
    if not args.load_weights.endswith(".bin") and not args.load_weights.endswith(
        ".xml"
    ):
        raise RuntimeError("Only OpenVINO-exported models are supported.")

    task_class = get_impl_class(template.entrypoints.openvino)

    environment = TaskEnvironment(
        model=None,
        hyper_parameters=create(hyper_parameters),
        label_schema=read_label_schema(
            os.path.join(os.path.dirname(args.load_weights), "label_schema.json")
        ),
        model_template=template,
    )
    environment.model = read_model(
        environment.get_model_configuration(), args.load_weights, None
    )

    task = task_class(task_environment=environment)

    deployed_model = ModelEntity(None, environment.get_model_configuration())

    os.makedirs(args.save_model_to, exist_ok=True)
    task.deploy(deployed_model)
    with open(os.path.join(args.save_model_to, "openvino.zip"), "wb") as write_file:
        write_file.write(deployed_model.exportable_code)
def test_training_progress_tracking(default_task_setup):
    print('Task initialized, model training starts.')
    training_progress_curve = []
    task, task_environment, dataset = default_task_setup

    def progress_callback(progress: float, score: Optional[float] = None):
        training_progress_curve.append(progress)

    train_parameters = TrainParameters
    train_parameters.update_progress = progress_callback
    output_model = ModelEntity(
        dataset,
        task_environment.get_model_configuration(),
    )
    task.train(dataset, output_model, train_parameters)

    assert len(training_progress_curve) > 0
    training_progress_curve = np.asarray(training_progress_curve)
    print(training_progress_curve)
    assert np.all(training_progress_curve[1:] >= training_progress_curve[:-1])
Ejemplo n.º 21
0
    def test_export_interface(self):
        """
        <b>Description:</b>
        Check IExportTask class object initialization

        <b>Input data:</b>
        IExportTask object

        <b>Expected results:</b>
        Test passes if IExportTask object export method raises NotImplementedError exception
        """
        dataset = DatasetEntity()
        configuration = ModelConfiguration(
            configurable_parameters=ConfigurableParameters(
                header="Test Header"),
            label_schema=LabelSchemaEntity(),
        )
        model_entity = ModelEntity(configuration=configuration,
                                   train_dataset=dataset)
        with pytest.raises(NotImplementedError):
            IExportTask().export(export_type=ExportType.OPENVINO,
                                 output_model=model_entity)
Ejemplo n.º 22
0
    def save_model(self, output_model: ModelEntity):
        """
        Save the model after training is completed.
        """
        config = self.get_config()
        model_info = {
            "model": self.model.state_dict(),
            "config": config,
            "VERSION": 1,
        }
        buffer = io.BytesIO()
        torch.save(model_info, buffer)
        output_model.set_data("weights.pth", buffer.getvalue())
        output_model.set_data(
            "label_schema.json",
            label_schema_to_bytes(self.task_environment.label_schema))
        # store computed threshold
        output_model.set_data(
            "threshold", bytes(struct.pack("f", self.model.threshold.item())))

        f1_score = self.model.image_metrics.OptimalF1.compute().item()
        output_model.performance = Performance(
            score=ScoreMetric(name="F1 Score", value=f1_score))
        output_model.precision = [ModelPrecision.FP32]
Ejemplo n.º 23
0
 def save_model(self, output_model: ModelEntity):
     for name, path in self._aux_model_snap_paths.items():
         with open(path, 'rb') as read_file:
             output_model.set_data(name, read_file.read())
     self._save_model(output_model)
Ejemplo n.º 24
0
    def optimize(
        self,
        optimization_type: OptimizationType,
        dataset: DatasetEntity,
        output_model: ModelEntity,
        optimization_parameters: Optional[OptimizationParameters],
    ):
        """ Optimize a model on a dataset """
        if optimization_type is not OptimizationType.NNCF:
            raise RuntimeError('NNCF is the only supported optimization')
        if self._compression_ctrl:
            raise RuntimeError('The model is already optimized. NNCF requires the original model for optimization.')
        if self._cfg.train.ema.enable:
            raise RuntimeError('EMA model could not be used together with NNCF compression')
        if self._cfg.lr_finder.enable:
            raise RuntimeError('LR finder could not be used together with NNCF compression')

        aux_pretrained_dicts = self._load_aux_models_data(self._task_environment.model)
        num_aux_models = len(self._cfg.mutual_learning.aux_configs)
        num_aux_pretrained_dicts = len(aux_pretrained_dicts)
        if num_aux_models != num_aux_pretrained_dicts:
            raise RuntimeError('The pretrained weights are not provided for all aux models.')

        if optimization_parameters is not None:
            update_progress_callback = optimization_parameters.update_progress
        else:
            update_progress_callback = default_progress_callback
        time_monitor = TrainingProgressCallback(update_progress_callback, num_epoch=self._cfg.train.max_epoch,
                                                num_train_steps=math.ceil(len(dataset.get_subset(Subset.TRAINING)) /
                                                                          self._cfg.train.batch_size),
                                                num_val_steps=0, num_test_steps=0)

        self.metrics_monitor = DefaultMetricsMonitor()
        self.stop_callback.reset()

        set_random_seed(self._cfg.train.seed)
        train_subset = dataset.get_subset(Subset.TRAINING)
        val_subset = dataset.get_subset(Subset.VALIDATION)
        self._cfg.custom_datasets.roots = [OTEClassificationDataset(train_subset, self._labels, self._multilabel,
                                                                    keep_empty_label=self._empty_label in self._labels),
                                           OTEClassificationDataset(val_subset, self._labels, self._multilabel,
                                                                    keep_empty_label=self._empty_label in self._labels)]
        datamanager = torchreid.data.ImageDataManager(**imagedata_kwargs(self._cfg))

        self._compression_ctrl, self._model, self._nncf_metainfo = \
            wrap_nncf_model(self._model, self._cfg, datamanager_for_init=datamanager)

        self._cfg.train.lr = calculate_lr_for_nncf_training(self._cfg, self._initial_lr, False)

        train_model = self._model
        if self._cfg.use_gpu:
            main_device_ids = list(range(self.num_devices))
            extra_device_ids = [main_device_ids for _ in range(num_aux_models)]
            train_model = DataParallel(train_model, device_ids=main_device_ids,
                                       output_device=0).cuda(main_device_ids[0])
        else:
            extra_device_ids = [None for _ in range(num_aux_models)]

        optimizer = torchreid.optim.build_optimizer(train_model, **optimizer_kwargs(self._cfg))

        scheduler = torchreid.optim.build_lr_scheduler(optimizer, num_iter=datamanager.num_iter,
                                                       **lr_scheduler_kwargs(self._cfg))

        logger.info('Start training')
        run_training(self._cfg, datamanager, train_model, optimizer,
                     scheduler, extra_device_ids, self._cfg.train.lr,
                     should_freeze_aux_models=True,
                     aux_pretrained_dicts=aux_pretrained_dicts,
                     tb_writer=self.metrics_monitor,
                     perf_monitor=time_monitor,
                     stop_callback=self.stop_callback,
                     nncf_metainfo=self._nncf_metainfo,
                     compression_ctrl=self._compression_ctrl)

        self.metrics_monitor.close()
        if self.stop_callback.check_stop():
            logger.info('Training cancelled.')
            return

        logger.info('Training completed')

        self.save_model(output_model)

        output_model.model_format = ModelFormat.BASE_FRAMEWORK
        output_model.optimization_type = self._optimization_type
        output_model.optimization_methods = self._optimization_methods
        output_model.precision = self._precision
Ejemplo n.º 25
0
class OTETestNNCFAction(BaseOTETestAction):
    _name = "nncf"
    _depends_stages_names = ["training"]

    def _run_ote_nncf(self, data_collector, model_template, dataset,
                      trained_model, environment):
        logger.debug(
            "Get predictions on the validation set for exported model")
        self.environment_for_nncf = deepcopy(environment)

        logger.info("Create NNCF Task")
        nncf_task_class_impl_path = model_template.entrypoints.nncf
        if not nncf_task_class_impl_path:
            pytest.skip("NNCF is not enabled for this template")

        if not is_nncf_enabled():
            pytest.skip("NNCF is not installed")

        logger.info("Creating NNCF task and structures")
        self.nncf_model = ModelEntity(
            dataset,
            self.environment_for_nncf.get_model_configuration(),
            model_status=ModelStatus.NOT_READY,
        )
        self.nncf_model.set_data("weights.pth",
                                 trained_model.get_data("weights.pth"))

        self.environment_for_nncf.model = self.nncf_model

        nncf_task_cls = get_impl_class(nncf_task_class_impl_path)
        self.nncf_task = nncf_task_cls(
            task_environment=self.environment_for_nncf)

        logger.info("Run NNCF optimization")
        self.nncf_task.optimize(OptimizationType.NNCF, dataset,
                                self.nncf_model, None)
        assert (self.nncf_model.model_status == ModelStatus.SUCCESS
                ), "NNCF optimization was not successful"
        assert (self.nncf_model.optimization_type == ModelOptimizationType.NNCF
                ), "Wrong optimization type"
        assert (self.nncf_model.model_format == ModelFormat.BASE_FRAMEWORK
                ), "Wrong model format"
        logger.info("NNCF optimization is finished")

    def __call__(self, data_collector: DataCollector,
                 results_prev_stages: OrderedDict):
        self._check_result_prev_stages(results_prev_stages,
                                       self.depends_stages_names)

        kwargs = {
            "model_template":
            results_prev_stages["training"]["model_template"],
            "dataset": results_prev_stages["training"]["dataset"],
            "trained_model": results_prev_stages["training"]["output_model"],
            "environment": results_prev_stages["training"]["environment"],
        }

        self._run_ote_nncf(data_collector, **kwargs)
        results = {
            "nncf_task": self.nncf_task,
            "nncf_model": self.nncf_model,
            "nncf_environment": self.environment_for_nncf,
        }
        return results
Ejemplo n.º 26
0
def main():
    """
    Main function that is used for model training.
    """

    # Dynamically create an argument parser based on override parameters.
    args, template, hyper_parameters = parse_args()
    # Get new values from user's input.
    updated_hyper_parameters = gen_params_dict_from_args(args)
    # Override overridden parameters by user's values.
    override_parameters(updated_hyper_parameters, hyper_parameters)

    hyper_parameters = create(hyper_parameters)

    # Get classes for Task, ConfigurableParameters and Dataset.
    task_class = get_impl_class(template.entrypoints.base)
    dataset_class = get_dataset_class(template.task_type)

    # Create instances of Task, ConfigurableParameters and Dataset.
    dataset = dataset_class(
        train_subset={
            "ann_file": args.train_ann_files,
            "data_root": args.train_data_roots,
        },
        val_subset={"ann_file": args.val_ann_files, "data_root": args.val_data_roots},
    )

    environment = TaskEnvironment(
        model=None,
        hyper_parameters=hyper_parameters,
        label_schema=generate_label_schema(dataset, template.task_type),
        model_template=template,
    )

    if args.load_weights:
        environment.model = ModelEntity(
            train_dataset=dataset,
            configuration=environment.get_model_configuration(),
            model_adapters={
                "weights.pth": ModelAdapter(read_binary(args.load_weights))
            },
        )

    if args.enable_hpo:
        run_hpo(args, environment, dataset, template.task_type)

    task = task_class(task_environment=environment)

    output_model = ModelEntity(
        dataset,
        environment.get_model_configuration(),
        model_status=ModelStatus.NOT_READY,
    )

    task.train(dataset, output_model, train_parameters=TrainParameters())

    save_model_data(output_model, args.save_model_to)

    validation_dataset = dataset.get_subset(Subset.VALIDATION)
    predicted_validation_dataset = task.infer(
        validation_dataset.with_empty_annotations(),
        InferenceParameters(is_evaluation=True),
    )

    resultset = ResultSetEntity(
        model=output_model,
        ground_truth_dataset=validation_dataset,
        prediction_dataset=predicted_validation_dataset,
    )
    task.evaluate(resultset)
    assert resultset.performance is not None
    print(resultset.performance)
Ejemplo n.º 27
0
    def optimize(
        self,
        optimization_type: OptimizationType,
        dataset: DatasetEntity,
        output_model: ModelEntity,
        optimization_parameters: Optional[OptimizationParameters],
    ):
        """Optimize the model.

        Args:
            optimization_type (OptimizationType): Type of optimization [POT or NNCF]
            dataset (DatasetEntity): Input Dataset.
            output_model (ModelEntity): Output model.
            optimization_parameters (Optional[OptimizationParameters]): Optimization parameters.

        Raises:
            ValueError: When the optimization type is not POT, which is the only support type at the moment.
        """
        if optimization_type is not OptimizationType.POT:
            raise ValueError(
                "POT is the only supported optimization type for OpenVINO models"
            )

        data_loader = OTEOpenVINOAnomalyDataloader(config=self.config,
                                                   dataset=dataset,
                                                   inferencer=self.inferencer)

        with tempfile.TemporaryDirectory() as tempdir:
            xml_path = os.path.join(tempdir, "model.xml")
            bin_path = os.path.join(tempdir, "model.bin")

            self.__save_weights(
                xml_path, self.task_environment.model.get_data("openvino.xml"))
            self.__save_weights(
                bin_path, self.task_environment.model.get_data("openvino.bin"))

            model_config = {
                "model_name": "openvino_model",
                "model": xml_path,
                "weights": bin_path,
            }
            model = load_model(model_config)

            if get_nodes_by_type(model, ["FakeQuantize"]):
                logger.warning("Model is already optimized by POT")
                return

        engine = IEEngine(config=ADDict({"device": "CPU"}),
                          data_loader=data_loader,
                          metric=None)
        pipeline = create_pipeline(
            algo_config=self._get_optimization_algorithms_configs(),
            engine=engine)
        compressed_model = pipeline.run(model)
        compress_model_weights(compressed_model)

        with tempfile.TemporaryDirectory() as tempdir:
            save_model(compressed_model, tempdir, model_name="model")
            self.__load_weights(path=os.path.join(tempdir, "model.xml"),
                                output_model=output_model,
                                key="openvino.xml")
            self.__load_weights(path=os.path.join(tempdir, "model.bin"),
                                output_model=output_model,
                                key="openvino.bin")

        output_model.set_data(
            "label_schema.json",
            label_schema_to_bytes(self.task_environment.label_schema))
        output_model.set_data(
            "threshold", self.task_environment.model.get_data("threshold"))
        output_model.model_status = ModelStatus.SUCCESS
        output_model.model_format = ModelFormat.OPENVINO
        output_model.optimization_type = ModelOptimizationType.POT
        output_model.optimization_methods = [OptimizationMethod.QUANTIZATION]
        output_model.precision = [ModelPrecision.INT8]

        self.task_environment.model = output_model
        self.inferencer = self.load_inferencer()
Ejemplo n.º 28
0
    def deploy(self, output_model: ModelEntity) -> None:
        """Exports the weights from ``output_model`` along with exportable code.

        Args:
            output_model (ModelEntity): Model with ``openvino.xml`` and ``.bin`` keys

        Raises:
            Exception: If ``task_environment.model`` is None
        """
        logger.info("Deploying Model")

        if self.task_environment.model is None:
            raise Exception(
                "task_environment.model is None. Cannot load weights.")

        work_dir = os.path.dirname(demo.__file__)
        parameters: Dict[str, Any] = {}
        parameters["type_of_model"] = "anomaly_classification"
        parameters["converter_type"] = "ANOMALY_CLASSIFICATION"
        parameters["model_parameters"] = self._get_openvino_configuration()
        name_of_package = "demo_package"

        with tempfile.TemporaryDirectory() as tempdir:
            copyfile(os.path.join(work_dir, "setup.py"),
                     os.path.join(tempdir, "setup.py"))
            copyfile(os.path.join(work_dir, "requirements.txt"),
                     os.path.join(tempdir, "requirements.txt"))
            copytree(os.path.join(work_dir, name_of_package),
                     os.path.join(tempdir, name_of_package))
            config_path = os.path.join(tempdir, name_of_package, "config.json")
            with open(config_path, "w", encoding="utf-8") as file:
                json.dump(parameters, file, ensure_ascii=False, indent=4)

            copyfile(inspect.getfile(AnomalyClassification),
                     os.path.join(tempdir, name_of_package, "model.py"))

            # create wheel package
            subprocess.run(
                [
                    sys.executable,
                    os.path.join(tempdir, "setup.py"),
                    "bdist_wheel",
                    "--dist-dir",
                    tempdir,
                    "clean",
                    "--all",
                ],
                check=True,
            )
            wheel_file_name = [
                f for f in os.listdir(tempdir) if f.endswith(".whl")
            ][0]

            with ZipFile(os.path.join(tempdir, "openvino.zip"), "w") as arch:
                arch.writestr(
                    os.path.join("model", "model.xml"),
                    self.task_environment.model.get_data("openvino.xml"))
                arch.writestr(
                    os.path.join("model", "model.bin"),
                    self.task_environment.model.get_data("openvino.bin"))
                arch.write(os.path.join(tempdir, "requirements.txt"),
                           os.path.join("python", "requirements.txt"))
                arch.write(os.path.join(work_dir, "README.md"),
                           os.path.join("python", "README.md"))
                arch.write(os.path.join(work_dir, "demo.py"),
                           os.path.join("python", "demo.py"))
                arch.write(os.path.join(tempdir, wheel_file_name),
                           os.path.join("python", wheel_file_name))
            with open(os.path.join(tempdir, "openvino.zip"),
                      "rb") as output_arch:
                output_model.exportable_code = output_arch.read()
        logger.info("Deploying completed")
Ejemplo n.º 29
0
    def optimize(self, optimization_type: OptimizationType,
                 dataset: DatasetEntity, output_model: ModelEntity,
                 optimization_parameters: Optional[OptimizationParameters]):

        if optimization_type is not OptimizationType.POT:
            raise ValueError(
                "POT is the only supported optimization type for OpenVino models"
            )

        data_loader = OTEOpenVinoDataLoader(dataset, self.inferencer)

        with tempfile.TemporaryDirectory() as tempdir:
            xml_path = os.path.join(tempdir, "model.xml")
            bin_path = os.path.join(tempdir, "model.bin")
            with open(xml_path, "wb") as f:
                f.write(self.model.get_data("openvino.xml"))
            with open(bin_path, "wb") as f:
                f.write(self.model.get_data("openvino.bin"))

            model_config = ADDict({
                'model_name': 'openvino_model',
                'model': xml_path,
                'weights': bin_path
            })

            model = load_model(model_config)

            if get_nodes_by_type(model, ["FakeQuantize"]):
                raise RuntimeError("Model is already optimized by POT")

        engine_config = ADDict({'device': 'CPU'})

        stat_subset_size = self.hparams.pot_parameters.stat_subset_size
        preset = self.hparams.pot_parameters.preset.name.lower()

        algorithms = [{
            'name': 'DefaultQuantization',
            'params': {
                'target_device': 'ANY',
                'preset': preset,
                'stat_subset_size': min(stat_subset_size, len(data_loader)),
                'shuffle_data': True
            }
        }]

        engine = IEEngine(config=engine_config,
                          data_loader=data_loader,
                          metric=None)

        pipeline = create_pipeline(algorithms, engine)

        compressed_model = pipeline.run(model)

        compress_model_weights(compressed_model)

        with tempfile.TemporaryDirectory() as tempdir:
            save_model(compressed_model, tempdir, model_name="model")
            with open(os.path.join(tempdir, "model.xml"), "rb") as f:
                output_model.set_data("openvino.xml", f.read())
            with open(os.path.join(tempdir, "model.bin"), "rb") as f:
                output_model.set_data("openvino.bin", f.read())

        output_model.set_data(
            "label_schema.json",
            label_schema_to_bytes(self.task_environment.label_schema))

        # set model attributes for quantized model
        output_model.model_format = ModelFormat.OPENVINO
        output_model.optimization_type = ModelOptimizationType.POT
        output_model.optimization_methods = [OptimizationMethod.QUANTIZATION]
        output_model.precision = [ModelPrecision.INT8]

        self.model = output_model
        self.inferencer = self.load_inferencer()
Ejemplo n.º 30
0
    def train(self,
              dataset: DatasetEntity,
              output_model: ModelEntity,
              train_parameters: Optional[TrainParameters] = None):
        """ Trains a model on a dataset """

        train_model = deepcopy(self._model)

        if train_parameters is not None:
            update_progress_callback = train_parameters.update_progress
        else:
            update_progress_callback = default_progress_callback
        time_monitor = TrainingProgressCallback(
            update_progress_callback,
            num_epoch=self._cfg.train.max_epoch,
            num_train_steps=math.ceil(
                len(dataset.get_subset(Subset.TRAINING)) /
                self._cfg.train.batch_size),
            num_val_steps=0,
            num_test_steps=0)

        self.metrics_monitor = DefaultMetricsMonitor()
        self.stop_callback.reset()

        set_random_seed(self._cfg.train.seed)
        train_subset = dataset.get_subset(Subset.TRAINING)
        val_subset = dataset.get_subset(Subset.VALIDATION)
        self._cfg.custom_datasets.roots = [
            OTEClassificationDataset(train_subset,
                                     self._labels,
                                     self._multilabel,
                                     keep_empty_label=self._empty_label
                                     in self._labels),
            OTEClassificationDataset(val_subset,
                                     self._labels,
                                     self._multilabel,
                                     keep_empty_label=self._empty_label
                                     in self._labels)
        ]
        datamanager = torchreid.data.ImageDataManager(
            **imagedata_kwargs(self._cfg))

        num_aux_models = len(self._cfg.mutual_learning.aux_configs)

        if self._cfg.use_gpu:
            main_device_ids = list(range(self.num_devices))
            extra_device_ids = [main_device_ids for _ in range(num_aux_models)]
            train_model = DataParallel(train_model,
                                       device_ids=main_device_ids,
                                       output_device=0).cuda(
                                           main_device_ids[0])
        else:
            extra_device_ids = [None for _ in range(num_aux_models)]

        optimizer = torchreid.optim.build_optimizer(
            train_model, **optimizer_kwargs(self._cfg))

        if self._cfg.lr_finder.enable:
            scheduler = None
        else:
            scheduler = torchreid.optim.build_lr_scheduler(
                optimizer,
                num_iter=datamanager.num_iter,
                **lr_scheduler_kwargs(self._cfg))

        if self._cfg.lr_finder.enable:
            _, train_model, optimizer, scheduler = \
                        run_lr_finder(self._cfg, datamanager, train_model, optimizer, scheduler, None,
                                      rebuild_model=False, gpu_num=self.num_devices, split_models=False)

        _, final_acc = run_training(self._cfg,
                                    datamanager,
                                    train_model,
                                    optimizer,
                                    scheduler,
                                    extra_device_ids,
                                    self._cfg.train.lr,
                                    tb_writer=self.metrics_monitor,
                                    perf_monitor=time_monitor,
                                    stop_callback=self.stop_callback)

        training_metrics = self._generate_training_metrics_group()

        self.metrics_monitor.close()
        if self.stop_callback.check_stop():
            logger.info('Training cancelled.')
            return

        logger.info("Training finished.")

        best_snap_path = os.path.join(self._scratch_space, 'best.pth')
        if os.path.isfile(best_snap_path):
            load_pretrained_weights(self._model, best_snap_path)

        for filename in os.listdir(self._scratch_space):
            match = re.match(r'best_(aux_model_[0-9]+\.pth)', filename)
            if match:
                aux_model_name = match.group(1)
                best_aux_snap_path = os.path.join(self._scratch_space,
                                                  filename)
                self._aux_model_snap_paths[aux_model_name] = best_aux_snap_path

        self.save_model(output_model)
        performance = Performance(score=ScoreMetric(value=final_acc,
                                                    name="accuracy"),
                                  dashboard_metrics=training_metrics)
        logger.info(f'FINAL MODEL PERFORMANCE {performance}')
        output_model.performance = performance