def load_vectorized_model(vt_pkl, model_pth):
    with open(vt_pkl, 'rb') as r:
        config_file = pickle.load(r)
        vt_layer = TextVectorization.from_config(config_file['config'])
        # You have to call `adapt` with some dummy data (BUG in Keras)
        vt_layer.adapt(tf.data.Dataset.from_tensor_slices(["xyz"]))
        vt_layer.set_weights(config_file['weights'])

    new_model = tf.keras.models.load_model(model_pth)

    # can be used on raw_oot_data
    export_model = tf.keras.Sequential(
        [vt_layer, new_model,
         layers.Activation('sigmoid')])
    export_model.compile(loss=losses.BinaryCrossentropy(from_logits=False),
                         optimizer="adam",
                         metrics=['accuracy'])
    print(export_model.summary())
    return export_model
Beispiel #2
0
    out_dir.mkdir(parents=True, exist_ok=True)

    with open(mod_dir / const.CFG_OUTPUT_FILE, "r") as file:
        cfg = yaml.safe_load(file)

    # Load pre-trained model
    model = tensorflow.keras.models.load_model(mod_dir)
    model.compile(loss="categorical_crossentropy",
                  optimizer="adam",
                  metrics=["accuracy"])

    # Load embedding model
    vl_path = mod_dir / const.VLAYER_OUTPUT_FILE
    vl = pickle.load(open(vl_path, "rb"))
    vlayer = TextVectorization.from_config(vl["config"])
    vlayer.set_weights(vl["weights"])
    # [BUG KERAS] You have to call `adapt` with some dummy data
    # new_v.adapt(tensorflow.data.Dataset.from_tensor_slices(["xyz"]))

    # A string input
    inputs = tensorflow.keras.Input(shape=(1, ), dtype="string")
    # Turn strings into vocab indices
    indices = vlayer(inputs)
    # Turn vocab indices into predictions
    outputs = model(indices)

    # Our end to end model
    end_to_end_model = tensorflow.keras.Model(inputs, outputs)
    end_to_end_model.compile(loss="binary_crossentropy",
                             optimizer="adam",