Beispiel #1
0
    def test_task_environment(self):
        """
        <b>Description:</b>
        Check the TaskEnvironment can correctly return the value

        <b>Input data:</b>
        Dummy data

        <b>Expected results:</b>
        Test passes if incoming data is processed correctly

        <b>Steps</b>
        1. Using an already created dummy environment.
        2. Checking class fields
        """

        env = environment()
        __dummy_config = dummy_config()

        assert env == TaskEnvironment(
            model=None,
            model_template=env.model_template,
            hyper_parameters=env.get_hyper_parameters(),
            label_schema=env.label_schema,
        )
        assert isinstance(env, TaskEnvironment)
        assert env != "Fail params"
        assert env.get_labels() == []

        for i in ["header", "description", "visible_in_ui"]:
            assert (getattr(
                env.get_model_configuration().configurable_parameters,
                i) == __dummy_config[i])

        assert env.get_model_configuration().configurable_parameters.id == ID()

        for param in __dummy_config:
            getattr(env.get_hyper_parameters(), param) == __dummy_config[param]

        assert env.get_hyper_parameters().id == ID()

        assert "model=None" in repr(env)
        assert "label_schema=LabelSchemaEntity(label_groups=[LabelGroup(id=" in repr(
            env)
        assert "name=from_label_list" in repr(env)
        assert "group_type=LabelGroupType.EXCLUSIVE" in repr(env)
        assert (
            "labels=[LabelEntity(123456789, name=car, hotkey=ctrl+0, domain=DETECTION"
            in repr(env))
        assert (
            "LabelEntity(987654321, name=person, hotkey=ctrl+0, domain=DETECTION"
            in repr(env))
        assert (
            "CONFIGURABLE_PARAMETERS(header='Configuration for an object detection task -- TEST ONLY'"
            in repr(env))
        assert (
            "description='Configuration for an object detection task -- TEST ONLY'"
            in repr(env))
        assert "visible_in_ui=True" in repr(env)
        assert "id=ID()" in repr(env)
Beispiel #2
0
def environment():
    """
    Return TaskEnvironment
    """
    car = LabelEntity(id=ID(123456789),
                      name="car",
                      domain=Domain.DETECTION,
                      is_empty=True)
    person = LabelEntity(id=ID(987654321),
                         name="person",
                         domain=Domain.DETECTION,
                         is_empty=True)
    labels_list = [car, person]
    dummy_template = __get_path_to_file("./dummy_template.yaml")
    model_template = parse_model_template(dummy_template)
    hyper_parameters = model_template.hyper_parameters.data
    params = ote_config_helper.create(hyper_parameters)
    labels_schema = LabelSchemaEntity.from_labels(labels_list)
    environment = TaskEnvironment(
        model=None,
        hyper_parameters=params,
        label_schema=labels_schema,
        model_template=model_template,
    )
    return environment
    def __init__(self, task_environment: TaskEnvironment):
        logger.info("Loading OTEClassificationTask.")
        self._scratch_space = tempfile.mkdtemp(prefix="ote-cls-scratch-")
        logger.info(f"Scratch space created at {self._scratch_space}")

        self._task_environment = task_environment
        if len(task_environment.get_labels(False)) == 1:
            self._labels = task_environment.get_labels(include_empty=True)
        else:
            self._labels = task_environment.get_labels(include_empty=False)
        self._empty_label = get_empty_label(task_environment.label_schema)
        self._multilabel = len(task_environment.label_schema.get_groups(False)) > 1 and \
                len(task_environment.label_schema.get_groups(False)) == \
                len(task_environment.get_labels(include_empty=False))

        self._hierarchical = False
        if not self._multilabel and len(
                task_environment.label_schema.get_groups(False)) > 1:
            self._labels = get_leaf_labels(task_environment.label_schema)
            self._hierarchical = True

        template_file_path = task_environment.model_template.model_template_path

        self._base_dir = os.path.abspath(os.path.dirname(template_file_path))

        self._cfg = get_default_config()
        self._patch_config(self._base_dir)

        if self._multilabel:
            assert self._cfg.model.type == 'multilabel', task_environment.model_template.model_template_path + \
                ' model template does not support multiclass classification'
        else:
            assert self._cfg.model.type == 'classification', task_environment.model_template.model_template_path + \
                ' model template does not support multilabel classification'

        self.device = torch.device(
            "cuda:0") if torch.cuda.device_count() else torch.device("cpu")
        self._model = self._load_model(task_environment.model,
                                       device=self.device)

        self.stop_callback = StopCallback()
        self.metrics_monitor = DefaultMetricsMonitor()

        # Set default model attributes.
        self._optimization_methods = []
        self._precision = [ModelPrecision.FP32]
        self._optimization_type = ModelOptimizationType.MO
Beispiel #4
0
def main():
    """
    Main function that is used for model exporting.
    """

    args = parse_args()

    # Load template.yaml file.
    template = find_and_parse_model_template(args.template)

    # Get class for Task.
    task_class = get_impl_class(template.entrypoints.base)

    # Get hyper parameters schema.
    hyper_parameters = create(template.hyper_parameters.data)
    assert hyper_parameters

    environment = TaskEnvironment(
        model=None,
        hyper_parameters=hyper_parameters,
        label_schema=read_label_schema(
            os.path.join(os.path.dirname(args.load_weights),
                         "label_schema.json")),
        model_template=template,
    )

    model_adapters = {
        "weights.pth": ModelAdapter(read_binary(args.load_weights))
    }
    model = ModelEntity(
        configuration=environment.get_model_configuration(),
        model_adapters=model_adapters,
        train_dataset=None,
    )
    environment.model = model

    task = task_class(task_environment=environment)

    exported_model = ModelEntity(None,
                                 environment.get_model_configuration(),
                                 model_status=ModelStatus.NOT_READY)

    task.export(ExportType.OPENVINO, exported_model)

    os.makedirs(args.save_model_to, exist_ok=True)
    save_model_data(exported_model, args.save_model_to)
def main():
    """
    Main function that is used for model evaluation.
    """

    # Parses input arguments.
    args = parse_args()

    # Reads model template file.
    template = find_and_parse_model_template(args.template)

    # Get hyper parameters schema.
    hyper_parameters = template.hyper_parameters.data
    assert hyper_parameters

    # Get classes for Task, ConfigurableParameters and Dataset.
    if not args.load_weights.endswith(".bin") and not args.load_weights.endswith(
        ".xml"
    ):
        raise RuntimeError("Only OpenVINO-exported models are supported.")

    task_class = get_impl_class(template.entrypoints.openvino)

    environment = TaskEnvironment(
        model=None,
        hyper_parameters=create(hyper_parameters),
        label_schema=read_label_schema(
            os.path.join(os.path.dirname(args.load_weights), "label_schema.json")
        ),
        model_template=template,
    )
    environment.model = read_model(
        environment.get_model_configuration(), args.load_weights, None
    )

    task = task_class(task_environment=environment)

    deployed_model = ModelEntity(None, environment.get_model_configuration())

    os.makedirs(args.save_model_to, exist_ok=True)
    task.deploy(deployed_model)
    with open(os.path.join(args.save_model_to, "openvino.zip"), "wb") as write_file:
        write_file.write(deployed_model.exportable_code)
Beispiel #6
0
 def _create_environment_and_task(params, labels_schema, model_template):
     environment = TaskEnvironment(
         model=None,
         hyper_parameters=params,
         label_schema=labels_schema,
         model_template=model_template,
     )
     logger.info("Create base Task")
     task_impl_path = model_template.entrypoints.base
     task_cls = get_impl_class(task_impl_path)
     task = task_cls(task_environment=environment)
     return environment, task
Beispiel #7
0
    def __init__(self, task_environment: TaskEnvironment):
        self.task_environment = task_environment
        self.model_name = task_environment.model_template.name
        self.labels = task_environment.get_labels()

        # Hyperparameters.
        self.project_path: str = tempfile.mkdtemp(prefix="ote-anomalib")
        self.config = self.get_config()

        self.model = self.load_model(ote_model=task_environment.model)

        self.trainer: Trainer
def init_environment(params, model_template, number_of_images=10):
    resolution = (224, 224)
    colors = [(0, 255, 0), (0, 0, 255)]
    cls_names = ['b', 'g']
    texts = ['Blue', 'Green']
    env_labels = [
        LabelEntity(name=name,
                    domain=Domain.CLASSIFICATION,
                    is_empty=False,
                    id=ID(i)) for i, name in enumerate(cls_names)
    ]

    items = []

    for _ in range(0, number_of_images):
        for j, lbl in enumerate(env_labels):
            class_img = np.zeros((*resolution, 3), dtype=np.uint8)
            class_img[:] = colors[j]
            class_img = cv.putText(class_img, texts[j], (50, 50),
                                   cv.FONT_HERSHEY_SIMPLEX, .8 + j * .2,
                                   colors[j - 1], 2, cv.LINE_AA)

            image = Image(data=class_img)
            labels = [ScoredLabel(label=lbl, probability=1.0)]
            shapes = [Annotation(Rectangle.generate_full_box(), labels)]
            annotation_scene = AnnotationSceneEntity(
                kind=AnnotationSceneKind.ANNOTATION, annotations=shapes)
            items.append(
                DatasetItemEntity(media=image,
                                  annotation_scene=annotation_scene))

    rng = random.Random()
    rng.seed(100)
    rng.shuffle(items)
    for i, _ in enumerate(items):
        subset_region = i / number_of_images
        if subset_region >= 0.9:
            subset = Subset.TESTING
        elif subset_region >= 0.6:
            subset = Subset.VALIDATION
        else:
            subset = Subset.TRAINING
        items[i].subset = subset

    dataset = DatasetEntity(items)
    labels_schema = generate_label_schema(dataset.get_labels(),
                                          multilabel=False)
    environment = TaskEnvironment(model=None,
                                  hyper_parameters=params,
                                  label_schema=labels_schema,
                                  model_template=model_template)
    return environment, dataset
Beispiel #9
0
def main():
    """
    Main function that is used for model demonstration.
    """

    # Dynamically create an argument parser based on override parameters.
    args, template, hyper_parameters = parse_args()
    # Get new values from user's input.
    updated_hyper_parameters = gen_params_dict_from_args(args)
    # Override overridden parameters by user's values.
    override_parameters(updated_hyper_parameters, hyper_parameters)

    hyper_parameters = create(hyper_parameters)

    # Get classes for Task, ConfigurableParameters and Dataset.
    if args.load_weights.endswith(".bin") or args.load_weights.endswith(
            ".xml"):
        task_class = get_impl_class(template.entrypoints.openvino)
    else:
        task_class = get_impl_class(template.entrypoints.base)

    environment = TaskEnvironment(
        model=None,
        hyper_parameters=hyper_parameters,
        label_schema=read_label_schema(
            os.path.join(os.path.dirname(args.load_weights),
                         "label_schema.json")),
        model_template=template,
    )

    environment.model = read_model(environment.get_model_configuration(),
                                   args.load_weights, None)

    task = task_class(task_environment=environment)

    capture = open_images_capture(args.input, args.loop)

    elapsed_times = deque(maxlen=10)
    frame_index = 0
    while True:
        frame = capture.read()
        if frame is None:
            break

        predictions, elapsed_time = get_predictions(task, frame)
        elapsed_times.append(elapsed_time)
        elapsed_time = np.mean(elapsed_times)

        frame = draw_predictions(template.task_type, predictions, frame,
                                 args.fit_to_size)
        if args.display_perf:
            put_text_on_rect_bg(
                frame,
                f"time: {elapsed_time:.4f} sec.",
                (0, frame.shape[0] - 30),
                color=(255, 255, 255),
            )

        if args.delay >= 0:
            cv2.imshow("frame", frame)
            if cv2.waitKey(args.delay) == ESC_BUTTON:
                break
        else:
            print(f"{frame_index=}, {elapsed_time=}, {len(predictions)=}")
def main():
    """
    Main function that is used for model evaluation.
    """

    # Dynamically create an argument parser based on override parameters.
    args, template, hyper_parameters = parse_args()
    # Get new values from user's input.
    updated_hyper_parameters = gen_params_dict_from_args(args)
    # Override overridden parameters by user's values.
    override_parameters(updated_hyper_parameters, hyper_parameters)

    hyper_parameters = create(hyper_parameters)

    # Get classes for Task, ConfigurableParameters and Dataset.
    if args.load_weights.endswith(".bin") or args.load_weights.endswith(".xml"):
        task_class = get_impl_class(template.entrypoints.openvino)
    else:
        task_class = get_impl_class(template.entrypoints.base)

    dataset_class = get_dataset_class(template.task_type)

    dataset = dataset_class(
        test_subset={"ann_file": args.test_ann_files, "data_root": args.test_data_roots}
    )

    dataset_label_schema = generate_label_schema(dataset, template.task_type)
    check_label_schemas(
        read_label_schema(
            os.path.join(os.path.dirname(args.load_weights), "label_schema.json")
        ),
        dataset_label_schema,
    )

    environment = TaskEnvironment(
        model=None,
        hyper_parameters=hyper_parameters,
        label_schema=dataset_label_schema,
        model_template=template,
    )

    model = read_model(environment.get_model_configuration(), args.load_weights, None)
    environment.model = model

    task = task_class(task_environment=environment)

    validation_dataset = dataset.get_subset(Subset.TESTING)
    predicted_validation_dataset = task.infer(
        validation_dataset.with_empty_annotations(),
        InferenceParameters(is_evaluation=True),
    )

    resultset = ResultSetEntity(
        model=model,
        ground_truth_dataset=validation_dataset,
        prediction_dataset=predicted_validation_dataset,
    )
    task.evaluate(resultset)
    assert resultset.performance is not None
    print(resultset.performance)

    if args.save_performance:
        with open(args.save_performance, "w", encoding="UTF-8") as write_file:
            json.dump(
                {resultset.performance.score.name: resultset.performance.score.value},
                write_file,
            )
def main():
    """
    Main function that is used for model training.
    """

    # Dynamically create an argument parser based on override parameters.
    args, template, hyper_parameters = parse_args()
    # Get new values from user's input.
    updated_hyper_parameters = gen_params_dict_from_args(args)
    # Override overridden parameters by user's values.
    override_parameters(updated_hyper_parameters, hyper_parameters)

    hyper_parameters = create(hyper_parameters)

    # Get classes for Task, ConfigurableParameters and Dataset.
    task_class = get_impl_class(template.entrypoints.base)
    dataset_class = get_dataset_class(template.task_type)

    # Create instances of Task, ConfigurableParameters and Dataset.
    dataset = dataset_class(
        train_subset={
            "ann_file": args.train_ann_files,
            "data_root": args.train_data_roots,
        },
        val_subset={"ann_file": args.val_ann_files, "data_root": args.val_data_roots},
    )

    environment = TaskEnvironment(
        model=None,
        hyper_parameters=hyper_parameters,
        label_schema=generate_label_schema(dataset, template.task_type),
        model_template=template,
    )

    if args.load_weights:
        environment.model = ModelEntity(
            train_dataset=dataset,
            configuration=environment.get_model_configuration(),
            model_adapters={
                "weights.pth": ModelAdapter(read_binary(args.load_weights))
            },
        )

    if args.enable_hpo:
        run_hpo(args, environment, dataset, template.task_type)

    task = task_class(task_environment=environment)

    output_model = ModelEntity(
        dataset,
        environment.get_model_configuration(),
        model_status=ModelStatus.NOT_READY,
    )

    task.train(dataset, output_model, train_parameters=TrainParameters())

    save_model_data(output_model, args.save_model_to)

    validation_dataset = dataset.get_subset(Subset.VALIDATION)
    predicted_validation_dataset = task.infer(
        validation_dataset.with_empty_annotations(),
        InferenceParameters(is_evaluation=True),
    )

    resultset = ResultSetEntity(
        model=output_model,
        ground_truth_dataset=validation_dataset,
        prediction_dataset=predicted_validation_dataset,
    )
    task.evaluate(resultset)
    assert resultset.performance is not None
    print(resultset.performance)
    def test_model_entity_sets_values(self):
        """
        <b>Description:</b>
        Check that ModelEntity correctly returns the set values

        <b>Expected results:</b>
        Test passes if ModelEntity correctly returns the set values

        <b>Steps</b>
        1. Check set values in the ModelEntity
        """
        def __get_path_to_file(filename: str):
            """
            Return the path to the file named 'filename', which lives in the tests/entities directory
            """
            return str(Path(__file__).parent / Path(filename))

        car = LabelEntity(name="car", domain=Domain.DETECTION)
        labels_list = [car]
        dummy_template = __get_path_to_file("./dummy_template.yaml")
        model_template = parse_model_template(dummy_template)
        hyper_parameters = model_template.hyper_parameters.data
        params = ote_config_helper.create(hyper_parameters)
        labels_schema = LabelSchemaEntity.from_labels(labels_list)
        environment = TaskEnvironment(
            model=None,
            hyper_parameters=params,
            label_schema=labels_schema,
            model_template=model_template,
        )

        item = self.generate_random_image()
        dataset = DatasetEntity(items=[item])
        score_metric = ScoreMetric(name="Model accuracy", value=0.5)

        model_entity = ModelEntity(train_dataset=self.dataset(),
                                   configuration=self.configuration())

        set_params = {
            "configuration": environment.get_model_configuration(),
            "train_dataset": dataset,
            "id": ID(1234567890),
            "creation_date": self.creation_date,
            "previous_trained_revision": 5,
            "previous_revision": 2,
            "version": 2,
            "tags": ["tree", "person"],
            "model_status": ModelStatus.TRAINED_NO_STATS,
            "model_format": ModelFormat.BASE_FRAMEWORK,
            "performance": Performance(score_metric),
            "training_duration": 5.8,
            "precision": [ModelPrecision.INT8],
            "latency": 328,
            "fps_throughput": 20,
            "target_device": TargetDevice.GPU,
            "target_device_type": "notebook",
            "optimization_methods": [OptimizationMethod.QUANTIZATION],
            "optimization_type": ModelOptimizationType.MO,
            "optimization_objectives": {
                "param": "Test param"
            },
            "performance_improvement": {"speed", 0.5},
            "model_size_reduction": 1.0,
        }

        for key, value in set_params.items():
            setattr(model_entity, key, value)
            assert getattr(model_entity, key) == value

        assert model_entity.is_optimized() is True
def run_hpo_trainer(
    hp_config,
    model,
    hyper_parameters,
    model_template,
    dataset_paths,
    task_type,
):
    """Run each training of each trial with given hyper parameters"""

    if isinstance(hyper_parameters, dict):
        current_params = {}
        for val in hyper_parameters["parameters"]:
            current_params[val] = hyper_parameters[val]
        hyper_parameters = create(model_template.hyper_parameters.data)
        HpoManager.set_hyperparameter(hyper_parameters, current_params)

    if dataset_paths is None:
        raise ValueError("Dataset is not defined.")

    impl_class = get_dataset_class(task_type)
    dataset = impl_class(
        train_subset={
            "ann_file": dataset_paths.get("train_ann_file", None),
            "data_root": dataset_paths.get("train_data_root", None),
        },
        val_subset={
            "ann_file": dataset_paths.get("val_ann_file", None),
            "data_root": dataset_paths.get("val_data_root", None),
        },
    )

    train_env = TaskEnvironment(
        model=model,
        hyper_parameters=hyper_parameters,
        label_schema=generate_label_schema(dataset, task_type),
        model_template=model_template,
    )

    hyper_parameters = train_env.get_hyper_parameters()

    # set epoch
    if task_type == TaskType.CLASSIFICATION:
        (hyper_parameters.learning_parameters.max_num_epochs
         ) = hp_config["iterations"]
    elif task_type in (TaskType.DETECTION, TaskType.SEGMENTATION):
        hyper_parameters.learning_parameters.num_iters = hp_config[
            "iterations"]

    # set hyper-parameters and print them
    HpoManager.set_hyperparameter(hyper_parameters, hp_config["params"])
    print(f"hyper parameter of current trial : {hp_config['params']}")

    train_env.set_hyper_parameters(hyper_parameters)
    train_env.model_template.hpo = {
        "hp_config": hp_config,
        "metric": hp_config["metric"],
    }

    impl_class = get_impl_class(train_env.model_template.entrypoints.base)
    task = impl_class(task_environment=train_env)

    dataset = HpoDataset(dataset, hp_config)
    if train_env.model:
        train_env.model.train_dataset = dataset
        train_env.model.confiugration.configurable_parameters = hyper_parameters

    output_model = ModelEntity(
        dataset,
        train_env.get_model_configuration(),
        model_status=ModelStatus.NOT_READY,
    )

    # make callback to report score to hpopt every epoch
    train_param = TrainParameters(
        False, HpoCallback(hp_config, hp_config["metric"], task), None)
    train_param.train_on_empty_model = None

    task.train(dataset=dataset,
               output_model=output_model,
               train_parameters=train_param)

    hpopt.finalize_trial(hp_config)