def evaluate_model(model):
    history = model.fit(x_train,
                        y_train,
                        epochs=50,
                        batch_size=256,
                        validation_data=(x_valid, y_valid))
    plot_acc_loss(history)
Esempio n. 2
0
def try_conv1d_imdb():
    model = Sequential()
    model.add(layers.Embedding(max_features, 128, input_length=maxlen))
    model.add(layers.Conv1D(32, 7, activation='relu'))
    model.add(layers.MaxPooling1D(5))
    model.add(layers.Conv1D(32, 7, activation='relu'))
    model.add(layers.GlobalMaxPooling1D())
    model.add(layers.Dense(1))

    model.summary()

    model.compile(optimizer=RMSprop(lr=1e-4),
                  loss='binary_crossentropy',
                  metrics=['acc'])
    history = model.fit(x_train,
                        y_train,
                        epochs=8,
                        batch_size=128,
                        validation_split=.2)
    plot_acc_loss(history, 8)

    model.evaluate(x_test, y_test)  # .85 acc, .43 loss
Esempio n. 3
0
testing_sequences = tokenizer.texts_to_sequences(testing_sentences)
testing_padded = pad_sequences(testing_sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type)

training_padded = np.array(training_padded)
training_labels = np.array(training_labels)
testing_padded = np.array(testing_padded)
testing_labels = np.array(testing_labels)

""" MODEL TUNING """

model = tf.keras.Sequential([
    tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length),
    tf.keras.layers.Bidirectional(tf.keras.layers.GRU(64)),
    # tf.keras.layers.Conv1D(128, 5, activation='relu'),
    # tf.keras.layers.GlobalMaxPooling1D(),
    # tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(32, return_sequences=True)),
    # tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(16)),
    tf.keras.layers.Dense(24, activation='relu'),
    tf.keras.layers.Dense(1, activation='sigmoid')
])

# model.summary()
model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['acc'])
history = model.fit(training_padded, training_labels,
                    epochs=num_epochs, batch_size=128,
                    validation_data=(testing_padded, testing_labels))

plot_acc_loss(history)

Esempio n. 4
0
ys = tf.keras.utils.to_categorical(labels, num_classes=total_words)

model = tf.keras.models.Sequential()
model.add(
    tf.keras.layers.Embedding(total_words,
                              100,
                              input_length=max_sequence_len - 1))
model.add(tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(150)))
model.add(tf.keras.layers.Dense(total_words, activation='softmax'))

model.compile(loss='categorical_crossentropy',
              metrics=['acc'],
              optimizer='adam')
history = model.fit(xs, ys, epochs=100, verbose=1)

plot_acc_loss(history, val=False)

seed_text = 'Laurence went to dublin'
next_words = 100

for _ in range(next_words):
    token_list = tokenizer.texts_to_sequences([seed_text])[0]
    token_list = pad_sequences([token_list],
                               maxlen=max_sequence_len - 1,
                               padding='pre')
    predicted = model.predict_classes(token_list)
    output_word = ""
    for word, index in tokenizer.word_index.items():
        if index == predicted:
            output_word = word
            break