example_sequence_input = input_example_batch[0]
    example_sequence_prediction_logits = example_batch_predictions[0]
    example_sequence_prediction_indice = tf.squeeze(tf.random.categorical(
        example_sequence_prediction_logits, num_samples=1),
                                                    axis=-1).numpy()

    print("Input:\n", text_from_ids(example_sequence_input).numpy())
    print("Next Char Predictions:\n",
          text_from_ids(example_sequence_prediction_indice).numpy())

loss = tf.losses.SparseCategoricalCrossentropy(from_logits=True)
model.compile(optimizer='adam', loss=loss)
history = model.fit(haikus_dataset, epochs=50)
model.save_weights('verse{}.h5'.format(verse))
model.load_weights('verse{}.h5'.format(verse))
'''
one_step_model = OneStep(model, chars_from_ids, ids_from_chars, temperature=1)

states = None
next_char = tf.constant(['Cherry'])
result = [next_char]

for n in range(1000):
    next_char, states = one_step_model.generate_one_step(next_char, states=states)
    result.append(next_char)

result = tf.strings.join(result)
print(result[0].numpy().decode('utf-8'), '\n\n' + '_'*80)
'''
Exemple #2
0
ids_from_chars = preprocessing.StringLookup(vocabulary=list(vocab),
                                            mask_token=None)
chars_from_ids = tf.keras.layers.experimental.preprocessing.StringLookup(
    vocabulary=ids_from_chars.get_vocabulary(), invert=True, mask_token=None)


def text_from_ids(ids):
    return tf.strings.reduce_join(chars_from_ids(ids), axis=-1)


example_batch = pickle.load(open('example_batch', 'rb'))
verse1_model = GRU(vocab_size=len(ids_from_chars.get_vocabulary()),
                   embedding_dim=256,
                   rnn_units=1024)
verse1_model(example_batch)
verse1_model.load_weights('verse0.h5')
verse1_model_oneStep = OneStep(verse1_model,
                               chars_from_ids,
                               ids_from_chars,
                               temperature=1)
verse2_model = GRU(vocab_size=len(ids_from_chars.get_vocabulary()),
                   embedding_dim=256,
                   rnn_units=1024)
verse2_model(example_batch)
verse2_model.load_weights('verse1.h5')
verse2_model_oneStep = OneStep(verse2_model,
                               chars_from_ids,
                               ids_from_chars,
                               temperature=1)
verse3_model = GRU(vocab_size=len(ids_from_chars.get_vocabulary()),
                   embedding_dim=256,