def tune_mlp_model(): _, data = load_data.load_cook_train_data(isLemmatize=True) num_layers = [1, 2, 3] num_units = [8, 16, 32, 64, 128] dropout_rates = [0.1, 0.2, 0.3, 0.4] params = { # 'layers': [], # 'units': [], 'dropout_rate': [], 'accuracy': [] } # for layers in num_layers: # for units in num_units: for dropout_rate in dropout_rates: # params['layers'] = layers # params['units'] = units params['dropout_rate'] = dropout_rate accuracy, _ = train_mlp_model.train_mlp_model( data, # units=units, # layers=layers, dropout_rate=dropout_rate) # print('Accuracy: {accuracy}, Parameters: (layers={layers}, ' # 'units={units})'.format(accuracy=accuracy, units=units, layers=layers)) print('Accuracy: {accuracy}, Parameters: dropout_rate={dropout_rate}'. format(accuracy=accuracy, dropout_rate=dropout_rate)) params['accuracy'] = accuracy _plot_parameters(params)
if num_classes == 2: loss = 'binary_crossentropy' else: loss = 'sparse_categorical_crossentropy' optimizer = tf.keras.optimizers.Adam(lr=learning_rate) model.compile(optimizer=optimizer, loss=loss, metrics=['acc']) callbacks = [ tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=2) ] history = model.fit(x_train, train_labels, epochs=epochs, callbacks=callbacks, validation_data=(x_val, val_labels), verbose=2, batch_size=batch_size) history = history.history print('Validation accuracy: {acc}, loss: {loss}'.format( acc=history['val_acc'][-1], loss=history['val_loss'][-1])) model.save('rotten_tomatoes_sepcnn_model.h5') return history['val_acc'][-1], history['val_loss'][-1] if __name__ == '__main__': class_names, data = load_data.load_cook_train_data(isLemmatize=True) print(class_names) train_sequence_model(data)