def _save_model(self,
                    output_model: ModelEntity,
                    state_dict: Optional[Dict] = None):
        """
        Save model
        """
        buffer = io.BytesIO()
        hyperparams = self._task_environment.get_hyper_parameters(
            OTEClassificationParameters)
        hyperparams_str = ids_to_strings(
            cfg_helper.convert(hyperparams, dict, enum_to_str=True))
        modelinfo = {
            'model': self._model.state_dict(),
            'config': hyperparams_str,
            'VERSION': 1
        }

        if state_dict is not None:
            modelinfo.update(state_dict)

        torch.save(modelinfo, buffer)
        output_model.set_data('weights.pth', buffer.getvalue())
        output_model.set_data(
            'label_schema.json',
            label_schema_to_bytes(self._task_environment.label_schema))
    def __load_weights(path: str, output_model: ModelEntity, key: str) -> None:
        """
        Load weights into output model

        Args:
            path (str): Path to weights
            output_model (ModelEntity): Model to which the weights are assigned
            key (str): Key of the output model into which the weights are assigned
        """
        with open(path, "rb") as file:
            output_model.set_data(key, file.read())
예제 #3
0
    def export(self, export_type: ExportType, output_model: ModelEntity):
        """Export model to OpenVINO IR

        Args:
            export_type (ExportType): Export type should be ExportType.OPENVINO
            output_model (ModelEntity): The model entity in which to write the OpenVINO IR data

        Raises:
            Exception: If export_type is not ExportType.OPENVINO
        """
        assert export_type == ExportType.OPENVINO

        # pylint: disable=no-member; need to refactor this
        height, width = self.config.model.input_size
        onnx_path = os.path.join(self.config.project.path, "onnx_model.onnx")
        torch.onnx.export(
            model=self.model.model,
            args=torch.zeros((1, 3, height, width)).to(self.model.device),
            f=onnx_path,
            opset_version=11,
        )
        optimize_command = "mo --input_model " + onnx_path + " --output_dir " + self.config.project.path
        subprocess.call(optimize_command, shell=True)
        bin_file = glob(os.path.join(self.config.project.path, "*.bin"))[0]
        xml_file = glob(os.path.join(self.config.project.path, "*.xml"))[0]
        with open(bin_file, "rb") as file:
            output_model.set_data("openvino.bin", file.read())
        with open(xml_file, "rb") as file:
            output_model.set_data("openvino.xml", file.read())
        output_model.set_data(
            "label_schema.json",
            label_schema_to_bytes(self.task_environment.label_schema))
        output_model.set_data(
            "threshold", bytes(struct.pack("f", self.model.threshold.item())))
    def export(self, export_type: ExportType, output_model: ModelEntity):
        assert export_type == ExportType.OPENVINO
        output_model.model_format = ModelFormat.OPENVINO
        output_model.optimization_type = self._optimization_type

        with tempfile.TemporaryDirectory() as tempdir:
            optimized_model_dir = os.path.join(tempdir, "dor")
            logger.info(
                f'Optimized model will be temporarily saved to "{optimized_model_dir}"'
            )
            os.makedirs(optimized_model_dir, exist_ok=True)
            try:
                onnx_model_path = os.path.join(optimized_model_dir,
                                               'model.onnx')
                mix_precision_status = self._model.mix_precision
                self._model.mix_precision = False
                export_onnx(self._model.eval(), self._cfg, onnx_model_path)
                self._model.mix_precision = mix_precision_status
                export_ir(onnx_model_path,
                          self._cfg.data.norm_mean,
                          self._cfg.data.norm_std,
                          optimized_model_dir=optimized_model_dir)

                bin_file = [
                    f for f in os.listdir(optimized_model_dir)
                    if f.endswith('.bin')
                ][0]
                xml_file = [
                    f for f in os.listdir(optimized_model_dir)
                    if f.endswith('.xml')
                ][0]
                with open(os.path.join(optimized_model_dir, bin_file),
                          "rb") as f:
                    output_model.set_data("openvino.bin", f.read())
                with open(os.path.join(optimized_model_dir, xml_file),
                          "rb") as f:
                    output_model.set_data("openvino.xml", f.read())
                output_model.precision = self._precision
                output_model.optimization_methods = self._optimization_methods
            except Exception as ex:
                raise RuntimeError('Optimization was unsuccessful.') from ex

        output_model.set_data(
            "label_schema.json",
            label_schema_to_bytes(self._task_environment.label_schema))
        logger.info('Exporting completed.')
예제 #5
0
    def save_model(self, output_model: ModelEntity):
        """
        Save the model after training is completed.
        """
        config = self.get_config()
        model_info = {
            "model": self.model.state_dict(),
            "config": config,
            "VERSION": 1,
        }
        buffer = io.BytesIO()
        torch.save(model_info, buffer)
        output_model.set_data("weights.pth", buffer.getvalue())
        output_model.set_data(
            "label_schema.json",
            label_schema_to_bytes(self.task_environment.label_schema))
        # store computed threshold
        output_model.set_data(
            "threshold", bytes(struct.pack("f", self.model.threshold.item())))

        f1_score = self.model.image_metrics.OptimalF1.compute().item()
        output_model.performance = Performance(
            score=ScoreMetric(name="F1 Score", value=f1_score))
        output_model.precision = [ModelPrecision.FP32]
예제 #6
0
    def optimize(self, optimization_type: OptimizationType,
                 dataset: DatasetEntity, output_model: ModelEntity,
                 optimization_parameters: Optional[OptimizationParameters]):

        if optimization_type is not OptimizationType.POT:
            raise ValueError(
                "POT is the only supported optimization type for OpenVino models"
            )

        data_loader = OTEOpenVinoDataLoader(dataset, self.inferencer)

        with tempfile.TemporaryDirectory() as tempdir:
            xml_path = os.path.join(tempdir, "model.xml")
            bin_path = os.path.join(tempdir, "model.bin")
            with open(xml_path, "wb") as f:
                f.write(self.model.get_data("openvino.xml"))
            with open(bin_path, "wb") as f:
                f.write(self.model.get_data("openvino.bin"))

            model_config = ADDict({
                'model_name': 'openvino_model',
                'model': xml_path,
                'weights': bin_path
            })

            model = load_model(model_config)

            if get_nodes_by_type(model, ["FakeQuantize"]):
                raise RuntimeError("Model is already optimized by POT")

        engine_config = ADDict({'device': 'CPU'})

        stat_subset_size = self.hparams.pot_parameters.stat_subset_size
        preset = self.hparams.pot_parameters.preset.name.lower()

        algorithms = [{
            'name': 'DefaultQuantization',
            'params': {
                'target_device': 'ANY',
                'preset': preset,
                'stat_subset_size': min(stat_subset_size, len(data_loader)),
                'shuffle_data': True
            }
        }]

        engine = IEEngine(config=engine_config,
                          data_loader=data_loader,
                          metric=None)

        pipeline = create_pipeline(algorithms, engine)

        compressed_model = pipeline.run(model)

        compress_model_weights(compressed_model)

        with tempfile.TemporaryDirectory() as tempdir:
            save_model(compressed_model, tempdir, model_name="model")
            with open(os.path.join(tempdir, "model.xml"), "rb") as f:
                output_model.set_data("openvino.xml", f.read())
            with open(os.path.join(tempdir, "model.bin"), "rb") as f:
                output_model.set_data("openvino.bin", f.read())

        output_model.set_data(
            "label_schema.json",
            label_schema_to_bytes(self.task_environment.label_schema))

        # set model attributes for quantized model
        output_model.model_format = ModelFormat.OPENVINO
        output_model.optimization_type = ModelOptimizationType.POT
        output_model.optimization_methods = [OptimizationMethod.QUANTIZATION]
        output_model.precision = [ModelPrecision.INT8]

        self.model = output_model
        self.inferencer = self.load_inferencer()
예제 #7
0
class OTETestNNCFAction(BaseOTETestAction):
    _name = "nncf"
    _depends_stages_names = ["training"]

    def _run_ote_nncf(self, data_collector, model_template, dataset,
                      trained_model, environment):
        logger.debug(
            "Get predictions on the validation set for exported model")
        self.environment_for_nncf = deepcopy(environment)

        logger.info("Create NNCF Task")
        nncf_task_class_impl_path = model_template.entrypoints.nncf
        if not nncf_task_class_impl_path:
            pytest.skip("NNCF is not enabled for this template")

        if not is_nncf_enabled():
            pytest.skip("NNCF is not installed")

        logger.info("Creating NNCF task and structures")
        self.nncf_model = ModelEntity(
            dataset,
            self.environment_for_nncf.get_model_configuration(),
            model_status=ModelStatus.NOT_READY,
        )
        self.nncf_model.set_data("weights.pth",
                                 trained_model.get_data("weights.pth"))

        self.environment_for_nncf.model = self.nncf_model

        nncf_task_cls = get_impl_class(nncf_task_class_impl_path)
        self.nncf_task = nncf_task_cls(
            task_environment=self.environment_for_nncf)

        logger.info("Run NNCF optimization")
        self.nncf_task.optimize(OptimizationType.NNCF, dataset,
                                self.nncf_model, None)
        assert (self.nncf_model.model_status == ModelStatus.SUCCESS
                ), "NNCF optimization was not successful"
        assert (self.nncf_model.optimization_type == ModelOptimizationType.NNCF
                ), "Wrong optimization type"
        assert (self.nncf_model.model_format == ModelFormat.BASE_FRAMEWORK
                ), "Wrong model format"
        logger.info("NNCF optimization is finished")

    def __call__(self, data_collector: DataCollector,
                 results_prev_stages: OrderedDict):
        self._check_result_prev_stages(results_prev_stages,
                                       self.depends_stages_names)

        kwargs = {
            "model_template":
            results_prev_stages["training"]["model_template"],
            "dataset": results_prev_stages["training"]["dataset"],
            "trained_model": results_prev_stages["training"]["output_model"],
            "environment": results_prev_stages["training"]["environment"],
        }

        self._run_ote_nncf(data_collector, **kwargs)
        results = {
            "nncf_task": self.nncf_task,
            "nncf_model": self.nncf_model,
            "nncf_environment": self.environment_for_nncf,
        }
        return results
    def optimize(
        self,
        optimization_type: OptimizationType,
        dataset: DatasetEntity,
        output_model: ModelEntity,
        optimization_parameters: Optional[OptimizationParameters],
    ):
        """Optimize the model.

        Args:
            optimization_type (OptimizationType): Type of optimization [POT or NNCF]
            dataset (DatasetEntity): Input Dataset.
            output_model (ModelEntity): Output model.
            optimization_parameters (Optional[OptimizationParameters]): Optimization parameters.

        Raises:
            ValueError: When the optimization type is not POT, which is the only support type at the moment.
        """
        if optimization_type is not OptimizationType.POT:
            raise ValueError(
                "POT is the only supported optimization type for OpenVINO models"
            )

        data_loader = OTEOpenVINOAnomalyDataloader(config=self.config,
                                                   dataset=dataset,
                                                   inferencer=self.inferencer)

        with tempfile.TemporaryDirectory() as tempdir:
            xml_path = os.path.join(tempdir, "model.xml")
            bin_path = os.path.join(tempdir, "model.bin")

            self.__save_weights(
                xml_path, self.task_environment.model.get_data("openvino.xml"))
            self.__save_weights(
                bin_path, self.task_environment.model.get_data("openvino.bin"))

            model_config = {
                "model_name": "openvino_model",
                "model": xml_path,
                "weights": bin_path,
            }
            model = load_model(model_config)

            if get_nodes_by_type(model, ["FakeQuantize"]):
                logger.warning("Model is already optimized by POT")
                return

        engine = IEEngine(config=ADDict({"device": "CPU"}),
                          data_loader=data_loader,
                          metric=None)
        pipeline = create_pipeline(
            algo_config=self._get_optimization_algorithms_configs(),
            engine=engine)
        compressed_model = pipeline.run(model)
        compress_model_weights(compressed_model)

        with tempfile.TemporaryDirectory() as tempdir:
            save_model(compressed_model, tempdir, model_name="model")
            self.__load_weights(path=os.path.join(tempdir, "model.xml"),
                                output_model=output_model,
                                key="openvino.xml")
            self.__load_weights(path=os.path.join(tempdir, "model.bin"),
                                output_model=output_model,
                                key="openvino.bin")

        output_model.set_data(
            "label_schema.json",
            label_schema_to_bytes(self.task_environment.label_schema))
        output_model.set_data(
            "threshold", self.task_environment.model.get_data("threshold"))
        output_model.model_status = ModelStatus.SUCCESS
        output_model.model_format = ModelFormat.OPENVINO
        output_model.optimization_type = ModelOptimizationType.POT
        output_model.optimization_methods = [OptimizationMethod.QUANTIZATION]
        output_model.precision = [ModelPrecision.INT8]

        self.task_environment.model = output_model
        self.inferencer = self.load_inferencer()
예제 #9
0
 def save_model(self, output_model: ModelEntity):
     for name, path in self._aux_model_snap_paths.items():
         with open(path, 'rb') as read_file:
             output_model.set_data(name, read_file.read())
     self._save_model(output_model)
    def test_model_entity_model_adapters(self):
        """
        <b>Description:</b>
        Check that ModelEntity correctly returns the adapters

        <b>Expected results:</b>
        Test passes if ModelEntity correctly returns the adapters

        <b>Steps</b>
        1. Create a ModelEntity with adapters
        2. Change data source for an adapter
        3. Remove an adapter
        """

        data_source_0 = b"{0: binaryrepo://localhost/repo/data_source/0}"
        data_source_1 = b"binaryrepo://localhost/repo/data_source/1"
        data_source_2 = b"binaryrepo://localhost/repo/data_source/2"
        data_source_3 = b"binaryrepo://localhost/repo/data_source/3"

        temp_dir = tempfile.TemporaryDirectory()
        temp_file = os.path.join(temp_dir.name, "data_source_0")

        with open(temp_file, "wb") as tmp:
            tmp.write(data_source_0)

        model_adapters = {
            "0": ModelAdapter(data_source=data_source_0),
            "1": ModelAdapter(data_source=data_source_1),
            "2": ModelAdapter(data_source=data_source_2),
        }

        model_entity = ModelEntity(
            train_dataset=self.dataset(),
            configuration=self.configuration(),
            model_adapters=model_adapters,
        )

        assert model_entity.weight_paths == {}

        # Adapter with key 0 not from file
        assert model_entity.model_adapters["0"].from_file_storage is False

        model_entity.set_data("0", temp_file)

        for adapter in model_entity.model_adapters:
            if adapter == "0":
                # Adapter with key 0 from file
                assert model_entity.model_adapters[
                    adapter].from_file_storage is True
            else:
                assert model_entity.model_adapters[
                    adapter].from_file_storage is False

        assert model_entity.get_data("1") == data_source_1

        model_entity.set_data("2", data_source_1)
        assert model_entity.get_data("2") == data_source_1
        assert len(model_entity.model_adapters) == 3

        model_entity.set_data("3", data_source_3)
        assert model_entity.get_data("3") == data_source_3
        assert len(model_entity.model_adapters) == 4

        model_entity.delete_data("3")
        assert len(model_entity.model_adapters) == 3

        # Attempt to retrieve a missing and deleted key
        with pytest.raises(KeyError):
            model_entity.get_data("5")

        with pytest.raises(KeyError):
            model_entity.get_data("3")

        # The presence of outdated code in ModelEntity "model_adapter.data_source.binary_url"
        with pytest.raises(AttributeError):
            assert model_entity.weight_paths == {"0": temp_file}