Example #1
0
def train_toy_example():
    # embedding_layer = Embedding(1000, 64)
    max_feature = 10000
    maxlen = 20

    (x_train, y_train), (x_test,
                         y_test) = imdb.load_data(num_words=max_feature)
    x_train = preprocessing.sequence.pad_sequences(x_train, maxlen=maxlen)
    x_test = preprocessing.sequence.pad_sequences(x_test, maxlen=maxlen)

    model = Sequential()
    # 10000 => max vocab size, 8 => Embedding vector length, input_length => len(sample)
    # output will be (len(sampleS), len(sample), 8)
    # https://keras.io/api/layers/core_layers/embedding/
    model.add(Embedding(10000, 8, input_length=maxlen))
    model.add(Flatten())
    model.add(Dense(1, activation='sigmoid'))
    model.compile(optimizer='rmsprop',
                  loss='binary_crossentropy',
                  metrics=['acc'])
    model.summary()

    history = model.fit(x_train,
                        y_train,
                        epochs=10,
                        batch_size=32,
                        validation_split=.2)
    plot_acc_loss(history, 10)
Example #2
0
def train_model_pretrained_embedding(model, save=False):
    model.layers[0].set_weights([embedding_matrix])
    model.layers[0].trainable = False

    model, history = compile_fit(model)
    if save:
        model.save_weights('pre_trained_glove_model.h5')
    plot_acc_loss(history, epochs)
Example #3
0
def train_plot_LSTM():
    model = Sequential()
    model.add(Embedding(max_features, 32))
    model.add(LSTM(32))
    model.add(Dense(1, activation='sigmoid'))
    model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc'])
    history = model.fit(input_train, y_train, epochs=10, batch_size=128, validation_split=.2)

    plot_acc_loss(history, epochs)
Example #4
0
def try_reverse_order():
    (x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
    x_train = [x[::-1] for x in x_train]
    x_test = [x[::-1] for x in x_test]

    x_train = preprocessing.sequence.pad_sequences(x_train, maxlen=maxlen)
    x_test = preprocessing.sequence.pad_sequences(x_test, maxlen=maxlen)

    model = Sequential()
    model.add(Embedding(max_features, 128))
    model.add(LSTM(32))
    model.add(Dense(1, activation='sigmoid'))
    model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc'])
    history = model.fit(x_train, y_train, epochs=10, batch_size=128, validation_split=.2)

    plot_acc_loss(history, 10)
Example #5
0
def train_model_fresh_embedding(model, save=False):
    model, history = compile_fit(model)
    if save:
        model.save_weights('fresh_embedding_model.h5')
    plot_acc_loss(history, epochs)