batch_size=1000, shuffle=True, class_mode="binary", target_size=(64, 64)) test_generator = test_datagen.flow_from_dataframe(dataframe=df_final[1400:], directory="img_test", x_col="name", y_col="label", batch_size=1000, shuffle=True, class_mode='binary', target_size=(64, 64)) history = model.fit_generator(train_generator, steps_per_epoch=10, epochs=500, validation_data=test_generator, validation_steps=10) # Plot Training and Test plt.plot(history.history['acc']) plt.plot(history.history['val_acc']) plt.title('CNN Binary Accuracy Spectogram') plt.ylabel('Accuracy') plt.xlabel('Epoch') plt.legend(['Train', 'Test'], loc='upper left') plt.savefig('model_performances_binary') plt.show() model.save('final_binary_model_pos_neg.h5')
('Mr', 'Miss', 'Mrs', 'Master', 'Dr', 'Rev', 'Officer', 'Royalty'), (0, 1, 2, 3, 4, 5, 6, 7), inplace=True) ################################################################### model = Sequential() model.add(Dense(32, activation='relu', input_shape=(8, ))) model.add(Dropout(rate=0.2)) model.add(Dense(64, activation='relu')) model.add(Dense(128, activation='relu')) model.add(Dropout(rate=0.2)) model.add(Dense(64, activation='relu')) model.add(Dropout(rate=0.2)) model.add(Dense(32, activation='relu')) model.add(Dropout(rate=0.2)) model.add(Dense(1, activation='sigmoid')) model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) x = df.drop('Survived', axis=1) y = df['Survived'] model_train = model.fit(x, y, epochs=500, batch_size=50, verbose=0, validation_split=0.06) model.save('titanic_NN.h5')
best_model_val_acc = ModelCheckpoint('best_model_val_acc',monitor='val_acc', mode = 'max', verbose=1, save_best_only=True) best_model_val_loss = ModelCheckpoint('best_model_val_loss',monitor='val_loss', mode = 'min', verbose=1, save_best_only=True) # early stopping to prevent overfitting early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0, patience=2, verbose=0, mode='auto', baseline=None, restore_best_weights=False ) # create csv with training data log csv_logger = CSVLogger('models/'+log_name) model.fit_generator( train_generator, steps_per_epoch=nb_train_samples // batch_size, epochs=nb_epochs, shuffle=True, validation_data=validation_generator, validation_steps=nb_validation_samples // batch_size, callbacks=[csv_logger, best_model_val_acc, best_model_val_loss] ) # save model model.save('models/' + model_name)