model.compile(loss="binary_crossentropy",
                          optimizer="adam",
                          metrics=['accuracy'])

            # Batch size ideal 20-200, scales with size of dataset
            model.fit(x_train,
                      y_train,
                      batch_size=4,
                      epochs=EPOCHS,
                      validation_split=0.2,
                      callbacks=[tensorboard])

            ##############
            # VALIDATION #
            ##############
            val_loss, val_acc = model.evaluate(x_valid, y_valid)
            print(val_loss, val_acc)

            ###########
            # CLEANUP #
            ###########
            model.save(NAME)

# ##############
# # VALIDATION #
# ##############
# val_loss, val_acc = model.evaluate(x_valid, y_valid)
# print(val_loss, val_acc)

###########
# CLEANUP #
Exemple #2
0
    # Concatenate all words.
    text = " ".join(words)

    return text


#Create the RNN
model = Sequential()
embedding_size = 8
model.add(
    Embedding(input_dim=num_words,
              output_dim=embedding_size,
              input_length=max_tokens,
              name='layer_embedding'))
model.add(GRU(units=16, return_sequences=True))
model.add(GRU(units=8, return_sequences=True))
model.add(GRU(units=4))
model.add(Dense(1, activation='sigmoid'))
optimizer = Adam(lr=1e-3)

model.compile(loss='binary_crossentropy',
              optimizer=optimizer,
              metrics=['accuracy'])

x = np.array(x_train_pad)
y = np.array(y_train)

model.fit(x, y, validation_split=0.06, epochs=3, batch_size=64)
result = model.evaluate(x_test_pad, y_test)
print("Accuracy: {0:.2%}".format(result[1]))