def test_configuration_yaml():
    configuration = OTEClassificationParameters()
    configuration_yaml_str = convert(configuration, str)
    configuration_yaml_converted = create(configuration_yaml_str)
    configuration_yaml_loaded = create(
        osp.join('torchreid', 'integration', 'sc', 'configuration.yaml'))
    assert configuration_yaml_converted == configuration_yaml_loaded
Пример #2
0
def test_configuration_yaml(configurable_parameters, model_name):
    # assert that we can parse the template.yaml
    template_file_path = os.path.join("anomaly_classification", "configs", model_name, "template.yaml")
    configuration_yaml_loaded, task_name = get_config_and_task_name(template_file_path)

    configuration = configurable_parameters()
    # assert that we can convert our config object to yaml format
    configuration_yaml_str = convert(configuration, str)
    # assert that we can create configurable parameters from the yaml string
    configuration_yaml_converted = create(configuration_yaml_str)
    # assert that we generate an anomalib config from the
    get_anomalib_config(task_name, configuration_yaml_converted)
    # assert that the python class and the yaml file result in the same configurable parameters object
    assert configuration_yaml_converted == configuration_yaml_loaded
Пример #3
0
def get_config_and_task_name(
        template_file_path: str) -> Tuple[ConfigurableParameters, str]:
    """Return configurable parameters and model name given template path

    Args:
        template_file_path (str): template path

    Returns:
        Tuple[ConfigurableParameters, str]: Configurable parameters, model name
    """
    model_template: ModelTemplate = parse_model_template(template_file_path)
    hyper_parameters: dict = model_template.hyper_parameters.data
    config: ConfigurableParameters = create(hyper_parameters)
    return config, model_template.name
Пример #4
0
def main():
    """
    Main function that is used for model exporting.
    """

    args = parse_args()

    # Load template.yaml file.
    template = find_and_parse_model_template(args.template)

    # Get class for Task.
    task_class = get_impl_class(template.entrypoints.base)

    # Get hyper parameters schema.
    hyper_parameters = create(template.hyper_parameters.data)
    assert hyper_parameters

    environment = TaskEnvironment(
        model=None,
        hyper_parameters=hyper_parameters,
        label_schema=read_label_schema(
            os.path.join(os.path.dirname(args.load_weights),
                         "label_schema.json")),
        model_template=template,
    )

    model_adapters = {
        "weights.pth": ModelAdapter(read_binary(args.load_weights))
    }
    model = ModelEntity(
        configuration=environment.get_model_configuration(),
        model_adapters=model_adapters,
        train_dataset=None,
    )
    environment.model = model

    task = task_class(task_environment=environment)

    exported_model = ModelEntity(None,
                                 environment.get_model_configuration(),
                                 model_status=ModelStatus.NOT_READY)

    task.export(ExportType.OPENVINO, exported_model)

    os.makedirs(args.save_model_to, exist_ok=True)
    save_model_data(exported_model, args.save_model_to)
Пример #5
0
def main():
    """
    Main function that is used for model evaluation.
    """

    # Parses input arguments.
    args = parse_args()

    # Reads model template file.
    template = find_and_parse_model_template(args.template)

    # Get hyper parameters schema.
    hyper_parameters = template.hyper_parameters.data
    assert hyper_parameters

    # Get classes for Task, ConfigurableParameters and Dataset.
    if not args.load_weights.endswith(".bin") and not args.load_weights.endswith(
        ".xml"
    ):
        raise RuntimeError("Only OpenVINO-exported models are supported.")

    task_class = get_impl_class(template.entrypoints.openvino)

    environment = TaskEnvironment(
        model=None,
        hyper_parameters=create(hyper_parameters),
        label_schema=read_label_schema(
            os.path.join(os.path.dirname(args.load_weights), "label_schema.json")
        ),
        model_template=template,
    )
    environment.model = read_model(
        environment.get_model_configuration(), args.load_weights, None
    )

    task = task_class(task_environment=environment)

    deployed_model = ModelEntity(None, environment.get_model_configuration())

    os.makedirs(args.save_model_to, exist_ok=True)
    task.deploy(deployed_model)
    with open(os.path.join(args.save_model_to, "openvino.zip"), "wb") as write_file:
        write_file.write(deployed_model.exportable_code)
def setup_configurable_parameters(template_dir, max_num_epochs=10):
    model_template = parse_model_template(
        osp.join(template_dir, 'template.yaml'))
    hyper_parameters = create(model_template.hyper_parameters.data)
    hyper_parameters.learning_parameters.max_num_epochs = max_num_epochs
    return hyper_parameters, model_template
Пример #7
0
def main():
    """
    Main function that is used for model demonstration.
    """

    # Dynamically create an argument parser based on override parameters.
    args, template, hyper_parameters = parse_args()
    # Get new values from user's input.
    updated_hyper_parameters = gen_params_dict_from_args(args)
    # Override overridden parameters by user's values.
    override_parameters(updated_hyper_parameters, hyper_parameters)

    hyper_parameters = create(hyper_parameters)

    # Get classes for Task, ConfigurableParameters and Dataset.
    if args.load_weights.endswith(".bin") or args.load_weights.endswith(
            ".xml"):
        task_class = get_impl_class(template.entrypoints.openvino)
    else:
        task_class = get_impl_class(template.entrypoints.base)

    environment = TaskEnvironment(
        model=None,
        hyper_parameters=hyper_parameters,
        label_schema=read_label_schema(
            os.path.join(os.path.dirname(args.load_weights),
                         "label_schema.json")),
        model_template=template,
    )

    environment.model = read_model(environment.get_model_configuration(),
                                   args.load_weights, None)

    task = task_class(task_environment=environment)

    capture = open_images_capture(args.input, args.loop)

    elapsed_times = deque(maxlen=10)
    frame_index = 0
    while True:
        frame = capture.read()
        if frame is None:
            break

        predictions, elapsed_time = get_predictions(task, frame)
        elapsed_times.append(elapsed_time)
        elapsed_time = np.mean(elapsed_times)

        frame = draw_predictions(template.task_type, predictions, frame,
                                 args.fit_to_size)
        if args.display_perf:
            put_text_on_rect_bg(
                frame,
                f"time: {elapsed_time:.4f} sec.",
                (0, frame.shape[0] - 30),
                color=(255, 255, 255),
            )

        if args.delay >= 0:
            cv2.imshow("frame", frame)
            if cv2.waitKey(args.delay) == ESC_BUTTON:
                break
        else:
            print(f"{frame_index=}, {elapsed_time=}, {len(predictions)=}")
Пример #8
0
def main():
    """
    Main function that is used for model evaluation.
    """

    # Dynamically create an argument parser based on override parameters.
    args, template, hyper_parameters = parse_args()
    # Get new values from user's input.
    updated_hyper_parameters = gen_params_dict_from_args(args)
    # Override overridden parameters by user's values.
    override_parameters(updated_hyper_parameters, hyper_parameters)

    hyper_parameters = create(hyper_parameters)

    # Get classes for Task, ConfigurableParameters and Dataset.
    if args.load_weights.endswith(".bin") or args.load_weights.endswith(".xml"):
        task_class = get_impl_class(template.entrypoints.openvino)
    else:
        task_class = get_impl_class(template.entrypoints.base)

    dataset_class = get_dataset_class(template.task_type)

    dataset = dataset_class(
        test_subset={"ann_file": args.test_ann_files, "data_root": args.test_data_roots}
    )

    dataset_label_schema = generate_label_schema(dataset, template.task_type)
    check_label_schemas(
        read_label_schema(
            os.path.join(os.path.dirname(args.load_weights), "label_schema.json")
        ),
        dataset_label_schema,
    )

    environment = TaskEnvironment(
        model=None,
        hyper_parameters=hyper_parameters,
        label_schema=dataset_label_schema,
        model_template=template,
    )

    model = read_model(environment.get_model_configuration(), args.load_weights, None)
    environment.model = model

    task = task_class(task_environment=environment)

    validation_dataset = dataset.get_subset(Subset.TESTING)
    predicted_validation_dataset = task.infer(
        validation_dataset.with_empty_annotations(),
        InferenceParameters(is_evaluation=True),
    )

    resultset = ResultSetEntity(
        model=model,
        ground_truth_dataset=validation_dataset,
        prediction_dataset=predicted_validation_dataset,
    )
    task.evaluate(resultset)
    assert resultset.performance is not None
    print(resultset.performance)

    if args.save_performance:
        with open(args.save_performance, "w", encoding="UTF-8") as write_file:
            json.dump(
                {resultset.performance.score.name: resultset.performance.score.value},
                write_file,
            )
Пример #9
0
def main():
    """
    Main function that is used for model training.
    """

    # Dynamically create an argument parser based on override parameters.
    args, template, hyper_parameters = parse_args()
    # Get new values from user's input.
    updated_hyper_parameters = gen_params_dict_from_args(args)
    # Override overridden parameters by user's values.
    override_parameters(updated_hyper_parameters, hyper_parameters)

    hyper_parameters = create(hyper_parameters)

    # Get classes for Task, ConfigurableParameters and Dataset.
    task_class = get_impl_class(template.entrypoints.base)
    dataset_class = get_dataset_class(template.task_type)

    # Create instances of Task, ConfigurableParameters and Dataset.
    dataset = dataset_class(
        train_subset={
            "ann_file": args.train_ann_files,
            "data_root": args.train_data_roots,
        },
        val_subset={"ann_file": args.val_ann_files, "data_root": args.val_data_roots},
    )

    environment = TaskEnvironment(
        model=None,
        hyper_parameters=hyper_parameters,
        label_schema=generate_label_schema(dataset, template.task_type),
        model_template=template,
    )

    if args.load_weights:
        environment.model = ModelEntity(
            train_dataset=dataset,
            configuration=environment.get_model_configuration(),
            model_adapters={
                "weights.pth": ModelAdapter(read_binary(args.load_weights))
            },
        )

    if args.enable_hpo:
        run_hpo(args, environment, dataset, template.task_type)

    task = task_class(task_environment=environment)

    output_model = ModelEntity(
        dataset,
        environment.get_model_configuration(),
        model_status=ModelStatus.NOT_READY,
    )

    task.train(dataset, output_model, train_parameters=TrainParameters())

    save_model_data(output_model, args.save_model_to)

    validation_dataset = dataset.get_subset(Subset.VALIDATION)
    predicted_validation_dataset = task.infer(
        validation_dataset.with_empty_annotations(),
        InferenceParameters(is_evaluation=True),
    )

    resultset = ResultSetEntity(
        model=output_model,
        ground_truth_dataset=validation_dataset,
        prediction_dataset=predicted_validation_dataset,
    )
    task.evaluate(resultset)
    assert resultset.performance is not None
    print(resultset.performance)
Пример #10
0
def run_hpo_trainer(
    hp_config,
    model,
    hyper_parameters,
    model_template,
    dataset_paths,
    task_type,
):
    """Run each training of each trial with given hyper parameters"""

    if isinstance(hyper_parameters, dict):
        current_params = {}
        for val in hyper_parameters["parameters"]:
            current_params[val] = hyper_parameters[val]
        hyper_parameters = create(model_template.hyper_parameters.data)
        HpoManager.set_hyperparameter(hyper_parameters, current_params)

    if dataset_paths is None:
        raise ValueError("Dataset is not defined.")

    impl_class = get_dataset_class(task_type)
    dataset = impl_class(
        train_subset={
            "ann_file": dataset_paths.get("train_ann_file", None),
            "data_root": dataset_paths.get("train_data_root", None),
        },
        val_subset={
            "ann_file": dataset_paths.get("val_ann_file", None),
            "data_root": dataset_paths.get("val_data_root", None),
        },
    )

    train_env = TaskEnvironment(
        model=model,
        hyper_parameters=hyper_parameters,
        label_schema=generate_label_schema(dataset, task_type),
        model_template=model_template,
    )

    hyper_parameters = train_env.get_hyper_parameters()

    # set epoch
    if task_type == TaskType.CLASSIFICATION:
        (hyper_parameters.learning_parameters.max_num_epochs
         ) = hp_config["iterations"]
    elif task_type in (TaskType.DETECTION, TaskType.SEGMENTATION):
        hyper_parameters.learning_parameters.num_iters = hp_config[
            "iterations"]

    # set hyper-parameters and print them
    HpoManager.set_hyperparameter(hyper_parameters, hp_config["params"])
    print(f"hyper parameter of current trial : {hp_config['params']}")

    train_env.set_hyper_parameters(hyper_parameters)
    train_env.model_template.hpo = {
        "hp_config": hp_config,
        "metric": hp_config["metric"],
    }

    impl_class = get_impl_class(train_env.model_template.entrypoints.base)
    task = impl_class(task_environment=train_env)

    dataset = HpoDataset(dataset, hp_config)
    if train_env.model:
        train_env.model.train_dataset = dataset
        train_env.model.confiugration.configurable_parameters = hyper_parameters

    output_model = ModelEntity(
        dataset,
        train_env.get_model_configuration(),
        model_status=ModelStatus.NOT_READY,
    )

    # make callback to report score to hpopt every epoch
    train_param = TrainParameters(
        False, HpoCallback(hp_config, hp_config["metric"], task), None)
    train_param.train_on_empty_model = None

    task.train(dataset=dataset,
               output_model=output_model,
               train_parameters=train_param)

    hpopt.finalize_trial(hp_config)