def save(self):
        dependency = self.gather_dependency()

        export_data = {
            "version":
            "1.0",
            "dependency":
            dependency,
            "processor":
            self.processor_builder.serialize(
                create_dir_if_needed(self.export_dir / "asset" / "processor")),
            "model":
            self.model_builder.serialize(
                create_dir_if_needed(self.export_dir / "asset" / "model")),
            "metadata":
            self.metadata_builder.serialize(
                create_dir_if_needed(self.export_dir / "asset" / "metadata")),
        }

        if self.remote_model_builder:
            export_data["remote_model"] = self.remote_model_builder.serialize(
                create_dir_if_needed(self.export_dir / "asset" /
                                     "remote_model"))

        metadata_file = self.export_dir / "metadata.json"

        with metadata_file.open("wt") as fd:
            json.dump(export_data, fd)

        return export_data
Beispiel #2
0
def main():
    raw_config = read_configure()
    model = Model(raw_config)

    config = model.get_default_config()
    config.update(raw_config)

    corpus = get_corpus_processor(config)
    corpus.prepare()
    train_data_generator_func = corpus.get_generator_func(corpus.TRAIN)
    eval_data_generator_func = corpus.get_generator_func(corpus.EVAL)

    corpus_meta_data = corpus.get_meta_info()

    config["tags_data"] = generate_tagset(corpus_meta_data["tags"])

    # train and evaluate model
    train_input_func = build_input_func(train_data_generator_func, config)
    eval_input_func = (build_input_func(eval_data_generator_func, config)
                       if eval_data_generator_func else None)

    evaluate_result, export_results, final_saved_model = model.train_and_eval_then_save(
        train_input_func, eval_input_func, config)

    export_as_deliverable_model(create_dir_if_needed(
        config["deliverable_model_dir"]),
                                tensorflow_saved_model=final_saved_model,
                                converter_for_request=converter_for_request,
                                converter_for_response=converter_for_response,
                                addition_model_dependency=["micro_toolkit"])
Beispiel #3
0
def main():

    raw_config = read_configure()
    model = Model(raw_config)

    config = model.get_default_config()  # 默认的配置
    config.update(raw_config)

    #  mlflow hyperparameter
    mlflow.log_param("Batch_Size", config["batch_size"])
    mlflow.log_param("Learning_Rate", config["learning_rate"])
    mlflow.log_param("Epochs", config["epochs"])
    mlflow.log_param("Embedding_Dim", config["embedding_dim"])

    corpus = get_corpus_processor(config)
    corpus.prepare()
    train_data_generator_func = corpus.get_generator_func(corpus.TRAIN)
    eval_data_generator_func = corpus.get_generator_func(corpus.EVAL)

    corpus_meta_data = corpus.get_meta_info()

    config["tags_data"] = generate_tagset(corpus_meta_data["tags"])

    # train and evaluate model
    train_input_func = build_input_func(train_data_generator_func, config)
    eval_input_func = (build_input_func(eval_data_generator_func, config)
                       if eval_data_generator_func else None)

    evaluate_result, export_results, final_saved_model = model.train_and_eval_then_save(
        train_input_func, eval_input_func, config)

    #  mlflow metrics
    # mlflow.log_metrics(evaluate_result, step=100)

    # mlflow Logging the saved model
    mlflow.tensorflow.log_model(tf_saved_model_dir=final_saved_model,
                                tf_meta_graph_tags=[tag_constants.SERVING],
                                tf_signature_def_key='serving_default',
                                artifact_path='model')

    export_as_deliverable_model(create_dir_if_needed(
        config["deliverable_model_dir"]),
                                tensorflow_saved_model=final_saved_model,
                                converter_for_request=ConverterForRequest(),
                                converter_for_response=ConverterForResponse(),
                                addition_model_dependency=["micro_toolkit"])
Beispiel #4
0
    def serialize(self, asset_dir: Path):
        instance = {}
        for (
                processor_instance_name,
                processor_instance,
        ) in self.processor_instance_registry.items():
            processor_instance_asset_dir = create_dir_if_needed(
                asset_dir / processor_instance_name)
            processor_instance.serialize(processor_instance_asset_dir)

            instance[processor_instance_name] = {
                "class": get_class_fqn_name(processor_instance),
                "parameter": processor_instance.get_config(),
            }

        pipeline = {
            "pre": self.preprocess_pipeline,
            "post": self.postprocess_pipeline
        }

        return {"version": "1.0", "instance": instance, "pipeline": pipeline}