Esempio n. 1
0
    def test_dataset_entity_purpose_setter(self):
        """
        <b>Description:</b>
        Check DatasetEntity class "purpose" setter

        <b>Input data:</b>
        DatasetEntity class objects with specified "items" and "purpose" parameters

        <b>Expected results:</b>
        Test passes if assigned value of "purpose" property is equal to expected

        <b>Steps</b>
        1. Check value returned by "purpose" property after using @purpose.setter for DatasetEntity with
        default optional parameters
        2. Check value returned by "purpose" property after using @purpose.setter for DatasetEntity initialized with
        specified optional parameters
        """
        # Checking "purpose" property after using @purpose.setter for DatasetEntity with default optional parameters
        default_parameters_dataset = DatasetEntity()
        expected_purpose = DatasetPurpose.TRAINING
        default_parameters_dataset.purpose = expected_purpose
        assert default_parameters_dataset.purpose == expected_purpose
        # Checking "purpose" property after using @purpose.setter for DatasetEntity with specified optional parameters
        optional_parameters_dataset = self.dataset()
        expected_purpose = DatasetPurpose.TASK_INFERENCE
        optional_parameters_dataset.purpose = expected_purpose
        assert optional_parameters_dataset.purpose == expected_purpose
Esempio n. 2
0
    def test_dataset_entity_initialization(self):
        """
        <b>Description:</b>
        Check DatasetEntity class object initialization

        <b>Input data:</b>
        DatasetEntity class objects with specified "items" and "purpose" parameters

        <b>Expected results:</b>
        Test passes if "items" and "purpose" attributes of DatasetEntity object are equal to expected values

        <b>Steps</b>
        1. Check attributes of DatasetItemEntity object initialized with default optional parameters
        2. Check attributes of DatasetItemEntity object initialized with specified optional parameters
        """
        # Checking attributes of DatasetItemEntity object initialized with default optional parameters
        default_parameters_dataset = DatasetEntity()
        assert default_parameters_dataset._items == []
        assert default_parameters_dataset.purpose == DatasetPurpose.INFERENCE
        # Checking attributes of DatasetItemEntity object initialized with specified optional parameters
        items = [
            self.default_values_dataset_item(),
            self.dataset_item(),
            self.dataset_item(),
        ]
        purpose = DatasetPurpose.TEMPORARY_DATASET
        optional_parameters_dataset = DatasetEntity(items, purpose)
        assert optional_parameters_dataset._items == items
        assert optional_parameters_dataset.purpose == purpose
def init_environment(params, model_template, number_of_images=10):
    resolution = (224, 224)
    colors = [(0, 255, 0), (0, 0, 255)]
    cls_names = ['b', 'g']
    texts = ['Blue', 'Green']
    env_labels = [
        LabelEntity(name=name,
                    domain=Domain.CLASSIFICATION,
                    is_empty=False,
                    id=ID(i)) for i, name in enumerate(cls_names)
    ]

    items = []

    for _ in range(0, number_of_images):
        for j, lbl in enumerate(env_labels):
            class_img = np.zeros((*resolution, 3), dtype=np.uint8)
            class_img[:] = colors[j]
            class_img = cv.putText(class_img, texts[j], (50, 50),
                                   cv.FONT_HERSHEY_SIMPLEX, .8 + j * .2,
                                   colors[j - 1], 2, cv.LINE_AA)

            image = Image(data=class_img)
            labels = [ScoredLabel(label=lbl, probability=1.0)]
            shapes = [Annotation(Rectangle.generate_full_box(), labels)]
            annotation_scene = AnnotationSceneEntity(
                kind=AnnotationSceneKind.ANNOTATION, annotations=shapes)
            items.append(
                DatasetItemEntity(media=image,
                                  annotation_scene=annotation_scene))

    rng = random.Random()
    rng.seed(100)
    rng.shuffle(items)
    for i, _ in enumerate(items):
        subset_region = i / number_of_images
        if subset_region >= 0.9:
            subset = Subset.TESTING
        elif subset_region >= 0.6:
            subset = Subset.VALIDATION
        else:
            subset = Subset.TRAINING
        items[i].subset = subset

    dataset = DatasetEntity(items)
    labels_schema = generate_label_schema(dataset.get_labels(),
                                          multilabel=False)
    environment = TaskEnvironment(model=None,
                                  hyper_parameters=params,
                                  label_schema=labels_schema,
                                  model_template=model_template)
    return environment, dataset
Esempio n. 4
0
    def test_evaluate_interface(self):
        """
        <b>Description:</b>
        Check IEvaluationTask class object initialization

        <b>Input data:</b>
        IEvaluationTask object

        <b>Expected results:</b>
        Test passes if IEvaluationTask object evaluate method raises NotImplementedError exception
        """
        dataset = DatasetEntity()
        configuration = ModelConfiguration(
            configurable_parameters=ConfigurableParameters(
                header="Test Header"),
            label_schema=LabelSchemaEntity(),
        )
        model_entity = ModelEntity(configuration=configuration,
                                   train_dataset=dataset)
        with pytest.raises(NotImplementedError):
            IEvaluationTask().evaluate(
                ResultSetEntity(
                    model=model_entity,
                    ground_truth_dataset=dataset,
                    prediction_dataset=dataset,
                ))
    def generate(self) -> DatasetEntity:
        """
        Generate OTE Anomaly Dataset

        Returns:
            DatasetEntity: Output OTE Anomaly Dataset from an MVTec
        """
        samples = self.get_samples()
        dataset_items: List[DatasetItemEntity] = []
        for _, sample in tqdm(samples.iterrows()):
            # Create image
            image = Image(file_path=sample.image_path)

            # Create annotation
            shape = Rectangle(x1=0, y1=0, x2=1, y2=1)
            labels = [ScoredLabel(sample.label)]
            annotations = [Annotation(shape=shape, labels=labels)]
            annotation_scene = AnnotationSceneEntity(annotations=annotations, kind=AnnotationSceneKind.ANNOTATION)

            # Create dataset item
            dataset_item = DatasetItemEntity(media=image, annotation_scene=annotation_scene, subset=sample.subset)

            # Add to dataset items
            dataset_items.append(dataset_item)

        dataset = DatasetEntity(items=dataset_items)
        return dataset
Esempio n. 6
0
    def test_training_interface(self):
        """
        <b>Description:</b>
        Check ITrainingTask class object initialization

        <b>Input data:</b>
        ITrainingTask object

        <b>Expected results:</b>
        Test passes if ITrainingTask object methods raise NotImplementedError exception
        """
        i_training_task = ITrainingTask()
        dataset = DatasetEntity()
        configuration = ModelConfiguration(
            configurable_parameters=ConfigurableParameters(
                header="Test Header"),
            label_schema=LabelSchemaEntity(),
        )
        model_entity = ModelEntity(configuration=configuration,
                                   train_dataset=dataset)
        train_parameters = TrainParameters()

        with pytest.raises(NotImplementedError):
            i_training_task.save_model(model_entity)
        with pytest.raises(NotImplementedError):
            i_training_task.train(
                dataset=dataset,
                output_model=model_entity,
                train_parameters=train_parameters,
            )
        with pytest.raises(NotImplementedError):
            i_training_task.cancel_training()
Esempio n. 7
0
    def test_optimization_interface(self):
        """
        <b>Description:</b>
        Check IOptimizationTask class object initialization

        <b>Input data:</b>
        IOptimizationTask object

        <b>Expected results:</b>
        Test passes if IOptimizationTask object optimize method raises NotImplementedError exception
        """
        dataset = DatasetEntity()
        configuration = ModelConfiguration(
            configurable_parameters=ConfigurableParameters(
                header="Test Header"),
            label_schema=LabelSchemaEntity(),
        )
        model_entity = ModelEntity(configuration=configuration,
                                   train_dataset=dataset)
        optimization_parameters = OptimizationParameters()
        with pytest.raises(NotImplementedError):
            IOptimizationTask().optimize(
                optimization_type=OptimizationType.POT,
                dataset=dataset,
                output_model=model_entity,
                optimization_parameters=optimization_parameters,
            )
Esempio n. 8
0
    def test_dataset_entity_eq(self):
        """
        <b>Description:</b>
        Check DatasetEntity class "__eq__" method

        <b>Input data:</b>
        DatasetEntity class objects with specified "items" and "purpose" parameters

        <b>Expected results:</b>
        Test passes if value returned by "__eq__" method is equal to expected

        <b>Steps</b>
        1. Check value returned by "__eq__" method for equal DatasetEntity objects
        2. Check value returned by "__eq__" method for DatasetEntity objects with unequal length
        3. Check value returned by "__eq__" method for DatasetEntity objects with equal length, but unequal
        DatasetItem objects
        4. Check value returned by "__eq__" method for DatasetEntity objects with unequal "purpose" attributes
        5. Check value returned by "__eq__" method for comparing DatasetEntity object with object of different type
        """
        # Checking value returned by "__eq__" method for equal DatasetEntity objects
        items = [
            self.default_values_dataset_item(),
            self.dataset_item(),
            self.dataset_item(),
        ]
        purpose = DatasetPurpose.TEMPORARY_DATASET
        dataset = DatasetEntity(items, purpose)
        equal_dataset = DatasetEntity(items, purpose)
        assert dataset == equal_dataset
        # Checking value returned by "__eq__" method for DatasetEntity objects with unequal length
        unequal_items = list(items)
        unequal_items.pop(-1)
        unequal_dataset = DatasetEntity(unequal_items, purpose)
        assert dataset != unequal_dataset
        # Checking value returned by "__eq__" method for DatasetEntity objects with equal length, but unequal
        # DatasetItem objects
        unequal_items.append(self.dataset_item())
        unequal_dataset = DatasetEntity(unequal_items, purpose)
        assert dataset != unequal_dataset
        # Checking value returned by "__eq__" method for DatasetEntity objects with unequal "purpose" attributes
        unequal_dataset = DatasetEntity(items, DatasetPurpose.EVALUATION)
        assert dataset != unequal_dataset
        # Checking value returned by "__eq__" method for comparing DatasetEntity object with object of different type
        assert dataset != str
Esempio n. 9
0
 def dataset(self) -> DatasetEntity:
     other_dataset_item = DatasetItemEntity(
         media=self.generate_random_image(),
         annotation_scene=self.annotations_entity(),
         metadata=self.metadata(),
         subset=Subset.VALIDATION,
     )
     items = [
         self.default_values_dataset_item(),
         self.dataset_item(),
         other_dataset_item,
     ]
     return DatasetEntity(items, DatasetPurpose.TEMPORARY_DATASET)
Esempio n. 10
0
 def metadata_item_with_model() -> MetadataItemEntity:
     data = TensorEntity(
         name="appended_metadata_with_model",
         numpy=np.random.randint(low=0, high=255, size=(10, 15, 3)),
     )
     configuration = ModelConfiguration(
         configurable_parameters=ConfigurableParameters(
             header="Test Header"),
         label_schema=LabelSchemaEntity(),
     )
     model = ModelEntity(configuration=configuration,
                         train_dataset=DatasetEntity())
     metadata_item_with_model = MetadataItemEntity(data=data, model=model)
     return metadata_item_with_model
Esempio n. 11
0
    def test_i_inference_task(self):
        """
        <b>Description:</b>
        Check IInferenceTask class object initialization

        <b>Input data:</b>
        IInferenceTask object

        <b>Expected results:</b>
        Test passes if IInferenceTask object infer method raises NotImplementedError exception
        """
        dataset = DatasetEntity()
        inference_parameters = InferenceParameters()
        with pytest.raises(NotImplementedError):
            IInferenceTask().infer(dataset=dataset,
                                   inference_parameters=inference_parameters)
Esempio n. 12
0
    def test_dataset_entity_add(self):
        """
        <b>Description:</b>
        Check DatasetEntity class "__add__" method

        <b>Input data:</b>
        DatasetEntity class object with specified "items" and "purpose" parameters

        <b>Expected results:</b>
        Test passes if DatasetEntity object returned by "__add__"" method is equal to expected

        <b>Steps</b>
        1. Check DatasetEntity object returned by "__add__"" method with DatasetEntity specified as "other" parameter
        2. Check DatasetEntity object returned by "__add__"" method with list of DatasetItemEntity objects specified
        as "other" parameter
        3. Check ValueError exception is raised when unexpected type object is specified in "other" parameter of
        "__add__" method
        """
        dataset = self.dataset()
        dataset_items = list(dataset._items)
        # Checking DatasetEntity object returned by "__add__"" method with DatasetEntity specified as "other" parameter
        other_dataset_items = [self.dataset_item(), self.dataset_item()]
        other_dataset = DatasetEntity(other_dataset_items,
                                      DatasetPurpose.TRAINING)
        new_dataset = dataset.__add__(other_dataset)
        assert new_dataset._items == dataset_items + other_dataset_items
        assert new_dataset.purpose == DatasetPurpose.TEMPORARY_DATASET
        # Checking DatasetEntity object returned by "__add__"" method with list of DatasetItemEntity objects specified
        # as "other" parameter
        items_to_add = [
            self.dataset_item(),
            self.dataset_item(),
            "unexpected type object",
        ]
        new_dataset = dataset.__add__(items_to_add)
        # Expected that str object will not be added to new_dataset._items
        assert new_dataset._items == dataset_items + items_to_add[0:2]
        assert new_dataset.purpose == DatasetPurpose.TEMPORARY_DATASET
        # Checking ValueError exception is raised when unexpected type object is specified in "other" parameter of
        # "__add__" method
        with pytest.raises(ValueError):
            dataset.__add__(str)
Esempio n. 13
0
    def test_dataset_entity_len(self):
        """
        <b>Description:</b>
        Check DatasetEntity class "__len__" method

        <b>Input data:</b>
        DatasetEntity class objects with specified "items" and "purpose" parameters

        <b>Expected results:</b>
        Test passes if value returned by "__len__" method is equal to expected

        <b>Steps</b>
        1. Check value returned by "__len__" method for DatasetEntity with default optional parameters
        2. Check value returned by "__len__" method for DatasetEntity with specified optional parameters
        """
        # Checking value returned by "__str__" method for DatasetEntity with default optional parameters
        default_parameters_dataset = DatasetEntity()
        assert len(default_parameters_dataset) == 0
        # Checking value returned by "__str__" method for DatasetEntity with specified optional parameters
        optional_parameters_dataset = self.dataset()
        assert len(optional_parameters_dataset) == 3
Esempio n. 14
0
    def test_export_interface(self):
        """
        <b>Description:</b>
        Check IExportTask class object initialization

        <b>Input data:</b>
        IExportTask object

        <b>Expected results:</b>
        Test passes if IExportTask object export method raises NotImplementedError exception
        """
        dataset = DatasetEntity()
        configuration = ModelConfiguration(
            configurable_parameters=ConfigurableParameters(
                header="Test Header"),
            label_schema=LabelSchemaEntity(),
        )
        model_entity = ModelEntity(configuration=configuration,
                                   train_dataset=dataset)
        with pytest.raises(NotImplementedError):
            IExportTask().export(export_type=ExportType.OPENVINO,
                                 output_model=model_entity)
Esempio n. 15
0
def get_predictions(task, frame):
    """
    Returns list of predictions made by task on frame and time spent on doing prediction.
    """

    empty_annotation = AnnotationSceneEntity(
        annotations=[], kind=AnnotationSceneKind.PREDICTION)

    item = DatasetItemEntity(
        media=Image(frame),
        annotation_scene=empty_annotation,
    )

    dataset = DatasetEntity(items=[item])

    start_time = time.perf_counter()
    predicted_validation_dataset = task.infer(
        dataset,
        InferenceParameters(is_evaluation=True),
    )
    elapsed_time = time.perf_counter() - start_time
    item = predicted_validation_dataset[0]
    return item.get_annotations(), elapsed_time
Esempio n. 16
0
    def test_dataset_entity_str(self):
        """
        <b>Description:</b>
        Check DatasetEntity class "__str__" method

        <b>Input data:</b>
        DatasetEntity class objects with specified "items" and "purpose" parameters

        <b>Expected results:</b>
        Test passes if value returned by "__str__" method is equal to expected

        <b>Steps</b>
        1. Check value returned by "__str__" method for DatasetEntity with default optional parameters
        2. Check value returned by "__str__" method for DatasetEntity with specified optional parameters
        """
        # Checking value returned by "__str__" method for DatasetEntity with default optional parameters
        default_parameters_dataset = DatasetEntity()
        assert (str(default_parameters_dataset) ==
                "DatasetEntity(size=0, purpose=INFERENCE)")
        # Checking value returned by "__str__" method for DatasetEntity with specified optional parameters
        optional_parameters_dataset = self.dataset()
        assert (str(optional_parameters_dataset) ==
                "DatasetEntity(size=3, purpose=TEMPORARY_DATASET)")
    def optimize(
        self,
        optimization_type: OptimizationType,
        dataset: DatasetEntity,
        output_model: ModelEntity,
        optimization_parameters: Optional[OptimizationParameters],
    ):
        """ Optimize a model on a dataset """
        if optimization_type is not OptimizationType.NNCF:
            raise RuntimeError('NNCF is the only supported optimization')
        if self._compression_ctrl:
            raise RuntimeError('The model is already optimized. NNCF requires the original model for optimization.')
        if self._cfg.train.ema.enable:
            raise RuntimeError('EMA model could not be used together with NNCF compression')
        if self._cfg.lr_finder.enable:
            raise RuntimeError('LR finder could not be used together with NNCF compression')

        aux_pretrained_dicts = self._load_aux_models_data(self._task_environment.model)
        num_aux_models = len(self._cfg.mutual_learning.aux_configs)
        num_aux_pretrained_dicts = len(aux_pretrained_dicts)
        if num_aux_models != num_aux_pretrained_dicts:
            raise RuntimeError('The pretrained weights are not provided for all aux models.')

        if optimization_parameters is not None:
            update_progress_callback = optimization_parameters.update_progress
        else:
            update_progress_callback = default_progress_callback
        time_monitor = TrainingProgressCallback(update_progress_callback, num_epoch=self._cfg.train.max_epoch,
                                                num_train_steps=math.ceil(len(dataset.get_subset(Subset.TRAINING)) /
                                                                          self._cfg.train.batch_size),
                                                num_val_steps=0, num_test_steps=0)

        self.metrics_monitor = DefaultMetricsMonitor()
        self.stop_callback.reset()

        set_random_seed(self._cfg.train.seed)
        train_subset = dataset.get_subset(Subset.TRAINING)
        val_subset = dataset.get_subset(Subset.VALIDATION)
        self._cfg.custom_datasets.roots = [OTEClassificationDataset(train_subset, self._labels, self._multilabel,
                                                                    keep_empty_label=self._empty_label in self._labels),
                                           OTEClassificationDataset(val_subset, self._labels, self._multilabel,
                                                                    keep_empty_label=self._empty_label in self._labels)]
        datamanager = torchreid.data.ImageDataManager(**imagedata_kwargs(self._cfg))

        self._compression_ctrl, self._model, self._nncf_metainfo = \
            wrap_nncf_model(self._model, self._cfg, datamanager_for_init=datamanager)

        self._cfg.train.lr = calculate_lr_for_nncf_training(self._cfg, self._initial_lr, False)

        train_model = self._model
        if self._cfg.use_gpu:
            main_device_ids = list(range(self.num_devices))
            extra_device_ids = [main_device_ids for _ in range(num_aux_models)]
            train_model = DataParallel(train_model, device_ids=main_device_ids,
                                       output_device=0).cuda(main_device_ids[0])
        else:
            extra_device_ids = [None for _ in range(num_aux_models)]

        optimizer = torchreid.optim.build_optimizer(train_model, **optimizer_kwargs(self._cfg))

        scheduler = torchreid.optim.build_lr_scheduler(optimizer, num_iter=datamanager.num_iter,
                                                       **lr_scheduler_kwargs(self._cfg))

        logger.info('Start training')
        run_training(self._cfg, datamanager, train_model, optimizer,
                     scheduler, extra_device_ids, self._cfg.train.lr,
                     should_freeze_aux_models=True,
                     aux_pretrained_dicts=aux_pretrained_dicts,
                     tb_writer=self.metrics_monitor,
                     perf_monitor=time_monitor,
                     stop_callback=self.stop_callback,
                     nncf_metainfo=self._nncf_metainfo,
                     compression_ctrl=self._compression_ctrl)

        self.metrics_monitor.close()
        if self.stop_callback.check_stop():
            logger.info('Training cancelled.')
            return

        logger.info('Training completed')

        self.save_model(output_model)

        output_model.model_format = ModelFormat.BASE_FRAMEWORK
        output_model.optimization_type = self._optimization_type
        output_model.optimization_methods = self._optimization_methods
        output_model.precision = self._precision
Esempio n. 18
0
    def train(self,
              dataset: DatasetEntity,
              output_model: ModelEntity,
              train_parameters: Optional[TrainParameters] = None):
        """ Trains a model on a dataset """

        train_model = deepcopy(self._model)

        if train_parameters is not None:
            update_progress_callback = train_parameters.update_progress
        else:
            update_progress_callback = default_progress_callback
        time_monitor = TrainingProgressCallback(
            update_progress_callback,
            num_epoch=self._cfg.train.max_epoch,
            num_train_steps=math.ceil(
                len(dataset.get_subset(Subset.TRAINING)) /
                self._cfg.train.batch_size),
            num_val_steps=0,
            num_test_steps=0)

        self.metrics_monitor = DefaultMetricsMonitor()
        self.stop_callback.reset()

        set_random_seed(self._cfg.train.seed)
        train_subset = dataset.get_subset(Subset.TRAINING)
        val_subset = dataset.get_subset(Subset.VALIDATION)
        self._cfg.custom_datasets.roots = [
            OTEClassificationDataset(train_subset,
                                     self._labels,
                                     self._multilabel,
                                     keep_empty_label=self._empty_label
                                     in self._labels),
            OTEClassificationDataset(val_subset,
                                     self._labels,
                                     self._multilabel,
                                     keep_empty_label=self._empty_label
                                     in self._labels)
        ]
        datamanager = torchreid.data.ImageDataManager(
            **imagedata_kwargs(self._cfg))

        num_aux_models = len(self._cfg.mutual_learning.aux_configs)

        if self._cfg.use_gpu:
            main_device_ids = list(range(self.num_devices))
            extra_device_ids = [main_device_ids for _ in range(num_aux_models)]
            train_model = DataParallel(train_model,
                                       device_ids=main_device_ids,
                                       output_device=0).cuda(
                                           main_device_ids[0])
        else:
            extra_device_ids = [None for _ in range(num_aux_models)]

        optimizer = torchreid.optim.build_optimizer(
            train_model, **optimizer_kwargs(self._cfg))

        if self._cfg.lr_finder.enable:
            scheduler = None
        else:
            scheduler = torchreid.optim.build_lr_scheduler(
                optimizer,
                num_iter=datamanager.num_iter,
                **lr_scheduler_kwargs(self._cfg))

        if self._cfg.lr_finder.enable:
            _, train_model, optimizer, scheduler = \
                        run_lr_finder(self._cfg, datamanager, train_model, optimizer, scheduler, None,
                                      rebuild_model=False, gpu_num=self.num_devices, split_models=False)

        _, final_acc = run_training(self._cfg,
                                    datamanager,
                                    train_model,
                                    optimizer,
                                    scheduler,
                                    extra_device_ids,
                                    self._cfg.train.lr,
                                    tb_writer=self.metrics_monitor,
                                    perf_monitor=time_monitor,
                                    stop_callback=self.stop_callback)

        training_metrics = self._generate_training_metrics_group()

        self.metrics_monitor.close()
        if self.stop_callback.check_stop():
            logger.info('Training cancelled.')
            return

        logger.info("Training finished.")

        best_snap_path = os.path.join(self._scratch_space, 'best.pth')
        if os.path.isfile(best_snap_path):
            load_pretrained_weights(self._model, best_snap_path)

        for filename in os.listdir(self._scratch_space):
            match = re.match(r'best_(aux_model_[0-9]+\.pth)', filename)
            if match:
                aux_model_name = match.group(1)
                best_aux_snap_path = os.path.join(self._scratch_space,
                                                  filename)
                self._aux_model_snap_paths[aux_model_name] = best_aux_snap_path

        self.save_model(output_model)
        performance = Performance(score=ScoreMetric(value=final_acc,
                                                    name="accuracy"),
                                  dashboard_metrics=training_metrics)
        logger.info(f'FINAL MODEL PERFORMANCE {performance}')
        output_model.performance = performance
 def dataset(self):
     return DatasetEntity(items=[self.generate_random_image()])
    def test_model_entity_sets_values(self):
        """
        <b>Description:</b>
        Check that ModelEntity correctly returns the set values

        <b>Expected results:</b>
        Test passes if ModelEntity correctly returns the set values

        <b>Steps</b>
        1. Check set values in the ModelEntity
        """
        def __get_path_to_file(filename: str):
            """
            Return the path to the file named 'filename', which lives in the tests/entities directory
            """
            return str(Path(__file__).parent / Path(filename))

        car = LabelEntity(name="car", domain=Domain.DETECTION)
        labels_list = [car]
        dummy_template = __get_path_to_file("./dummy_template.yaml")
        model_template = parse_model_template(dummy_template)
        hyper_parameters = model_template.hyper_parameters.data
        params = ote_config_helper.create(hyper_parameters)
        labels_schema = LabelSchemaEntity.from_labels(labels_list)
        environment = TaskEnvironment(
            model=None,
            hyper_parameters=params,
            label_schema=labels_schema,
            model_template=model_template,
        )

        item = self.generate_random_image()
        dataset = DatasetEntity(items=[item])
        score_metric = ScoreMetric(name="Model accuracy", value=0.5)

        model_entity = ModelEntity(train_dataset=self.dataset(),
                                   configuration=self.configuration())

        set_params = {
            "configuration": environment.get_model_configuration(),
            "train_dataset": dataset,
            "id": ID(1234567890),
            "creation_date": self.creation_date,
            "previous_trained_revision": 5,
            "previous_revision": 2,
            "version": 2,
            "tags": ["tree", "person"],
            "model_status": ModelStatus.TRAINED_NO_STATS,
            "model_format": ModelFormat.BASE_FRAMEWORK,
            "performance": Performance(score_metric),
            "training_duration": 5.8,
            "precision": [ModelPrecision.INT8],
            "latency": 328,
            "fps_throughput": 20,
            "target_device": TargetDevice.GPU,
            "target_device_type": "notebook",
            "optimization_methods": [OptimizationMethod.QUANTIZATION],
            "optimization_type": ModelOptimizationType.MO,
            "optimization_objectives": {
                "param": "Test param"
            },
            "performance_improvement": {"speed", 0.5},
            "model_size_reduction": 1.0,
        }

        for key, value in set_params.items():
            setattr(model_entity, key, value)
            assert getattr(model_entity, key) == value

        assert model_entity.is_optimized() is True