示例#1
0
def get_model_checkpoint_to_export(
        model_definition: ModelDefinition) -> tf.keras.Model:

    checkpoints_dir = model_definition.data_definition.get_data_dir_path(
        ModelDataDefinition.CHECKPOINTS_DIR)
    if model_definition.data_definition.export_checkpoint <= 0:
        # Get latest trained model
        print("Exporting latest trained epoch checkpoint")
        export_cp_path = tf.train.latest_checkpoint(checkpoints_dir)
        if export_cp_path == None:
            print("No checkpoint found at " + checkpoints_dir +
                  ": Nothing exported")
            exit()
    else:
        # Export from specific checkpoint
        export_cp_path = checkpoints_dir + "/checkpoint-{0:04d}.ckpt".format(
            model_definition.data_definition.export_checkpoint)
        print("Export specific checkpoint", export_cp_path)

    print("Loading checkpoint " + export_cp_path)
    model = model_definition.create_model_function(
        model_definition.data_definition)
    model.load_weights(export_cp_path)

    # TODO: Fails with GPT model (missing signature?)
    # TODO: It seems because input shapes are not specified. Try model.get_concrete_function (https://github.com/tensorflow/tensorflow/issues/40344)
    # See https://stackoverflow.com/questions/51806852/cant-save-custom-subclassed-model
    # From previous, see: https://colab.research.google.com/drive/172D4jishSgE3N7AO6U2OKAA_0wNnrMOq#scrollTo=4Onp-8rGyeQG
    # The only way to build a subclassed keras model I have found is to run predictions. So, here is:
    print("Building model...")
    all_data = DataDirectory.read_all(model_definition.data_definition)
    ds = model_definition.dataset_class(all_data,
                                        model_definition.data_definition,
                                        shuffle=False,
                                        debug_columns=False)
    build_model_ds = ds.dataset.batch(1).take(1)
    for input, output in build_model_ds:
        model(input)

    return model
    def __init__(self,
                 model_definition: ModelDefinition,
                 model: tf.keras.Model = None):
        self.model_definition = model_definition

        if model != None:
            # Create the TF prediction module
            self.prediction_module = model_definition.predictor_class(
                self.model_definition.data_definition, model)
        else:
            # Load it from the export directory
            exported_model_dir = self.model_definition.data_definition.get_data_dir_path(
                ModelDataDefinition.EXPORTED_MODEL_DIR)
            print("Loading prediction module from " + exported_model_dir)
            self.prediction_module = tf.saved_model.load(exported_model_dir)
示例#3
0
    # From previous, see: https://colab.research.google.com/drive/172D4jishSgE3N7AO6U2OKAA_0wNnrMOq#scrollTo=4Onp-8rGyeQG
    # The only way to build a subclassed keras model I have found is to run predictions. So, here is:
    print("Building model...")
    all_data = DataDirectory.read_all(model_definition.data_definition)
    ds = model_definition.dataset_class(all_data,
                                        model_definition.data_definition,
                                        shuffle=False,
                                        debug_columns=False)
    build_model_ds = ds.dataset.batch(1).take(1)
    for input, output in build_model_ds:
        model(input)

    return model


if __name__ == "__main__":

    # Read model definition
    model_definition = ModelDefinition()

    model = get_model_checkpoint_to_export(model_definition)

    # Save the TF prediction module with input preprocessing
    print("Saving model...")
    exported_model_dir = model_definition.data_definition.get_data_dir_path(
        ModelDataDefinition.EXPORTED_MODEL_DIR)
    prediction_module = model_definition.predictor_class(
        model_definition.data_definition, model)
    tf.saved_model.save(prediction_module, exported_model_dir)
    print("Model, with preprocessing, exported to " + exported_model_dir)
示例#4
0
from model_definition import ModelDefinition
from tflite.predictor_lite import PredictorLite
from predict.predictor import Predictor
from debug.debug_predictor import pretty_prediction
from time import time
import random
import numpy as np

model_definition = ModelDefinition()

# TF Lite prediction
predictor_lite = PredictorLite(model_definition)
empty_element = predictor_lite.get_empty_element()

# Full TF prediction
predictor = Predictor(model_definition)


def test_lite_performance():
    # Test performance
    n_repetitions = 1000
    print("Testing performance, n. repetitions:", n_repetitions)
    start = time()
    for i in range(n_repetitions):
        predictor_lite.predict(empty_element)
    end = time()
    print("Total time:", end - start, "s")
    print("Prediction performance:", ((end - start) / n_repetitions) * 1000,
          "ms")

示例#5
0
import tensorflow as tf
from model_definition import ModelDefinition
from model_data_definition import ModelDataDefinition
from export import get_model_checkpoint_to_export
from predict.gpt_predictor import GptPredictorLite

model_definition = ModelDefinition()
model = get_model_checkpoint_to_export(model_definition)

data_definition = model_definition.data_definition
exported_model_dir = data_definition.get_data_dir_path(
    ModelDataDefinition.EXPORTED_MODEL_DIR)

# Module to run predictions with TF lite
predict_module = model_definition.tflite_predictor_class(
    data_definition, model)

# Convert the predict function, with preprocessing
converter = tf.lite.TFLiteConverter.from_concrete_functions(
    [predict_module.predict_tflite_function.get_concrete_function()])

converter.target_spec.supported_ops = [
    tf.lite.OpsSet.TFLITE_BUILTINS,  # enable TensorFlow Lite ops.
    tf.lite.OpsSet.SELECT_TF_OPS  # enable TensorFlow ops.
]
tflite_model = converter.convert()

# Save the model.
path = data_definition.get_data_dir_path(ModelDataDefinition.TFLITE_PATH)
with open(path, 'wb') as f:
    f.write(tflite_model)
示例#6
0
from model_definition import ModelDefinition

model_definition = ModelDefinition()
trainer = model_definition.trainer_class()
trainer.train()

# Get prediction now to test if after export the prediction will be the same
# from predict.predictor import Predictor
# predictor = Predictor(model_definition, trainer.model)
# print( "Prediction:" , predictor.predict( predictor.get_empty_element() ) )
        print()

    n_batches = 0
    start = time()
    for _ in ds.dataset.batch(BATCH_SIZE):
        n_batches += 1
        if n_batches % 50 == 0:
            performance(start, n_batches)

    performance(start, n_batches)


if __name__ == '__main__':

    # Read data definition
    model_definition = ModelDefinition()

    # Read all CSV paths
    all_data = DataDirectory.read_all(model_definition.data_definition)

    # True -> Test dataset performance, False -> Print ds values
    TEST_PERFORMANCE = True

    # Create dataset for this model type
    print("Dataset type:", model_definition.dataset_class)
    ds = model_definition.dataset_class(all_data,
                                        model_definition.data_definition,
                                        shuffle=TEST_PERFORMANCE,
                                        debug_columns=not TEST_PERFORMANCE)

    # Test entire eval dataset
import configure_tf_log # Must be FIRST import
from model_definition import ModelDefinition
from predict.predictor import Predictor
from debug_ds import pretty
from time import time
import json
import tensorflow as tf
from data_directory import DataDirectory
from debug.debug_predictor import pretty_prediction

model_definition = ModelDefinition()
data_definition = model_definition.data_definition
predictor = Predictor(model_definition)

data_dir = DataDirectory(["data/PAlcCanFac.csv"])
ds = model_definition.dataset_class(data_dir, data_definition, shuffle=False, debug_columns=False)

ds.dataset = ds.dataset.batch(1).take(1)
#ds.dataset = ds.dataset.take(3)

# TODO: I think this was done to debug GPT only. Make it work with rnn too?

for row in ds.dataset:
    input = row[0]
    expected_output = row[1]

    #print(input)
    batched_logits = predictor.model(input)
    #print(batched_logits)