validation_scores['loss'] = history.history['val_loss']

	# write training and validation metrics to results dir
	dict_to_hdf5(training_scores, train_results_path)
	dict_to_hdf5(validation_scores, valid_results_path)

	# plot training vs validation and write to results dir
	plot_path = os.path.join(run_results_dir, 'train_validation_metrics.png')
	metrics_to_plot = ['loss', 'roc_auc']
	plot_train_vs_validation(metrics_to_plot, training_scores, validation_scores, plot_path)

	# save threshold values
	y_prob = model.predict(valid_x)
	calculate_threshold_scores(valid_y, y_prob, run_results_dir, threshold_interval, with_graph=False)

	logger.log_time('RESULTS SAVED')
	logger.log_time('ENDING RUN ' + str(run))
	logger.write_to_file(os.path.join(run_results_dir, 'logs.txt'))

	del train_x
	del train_y
	del valid_x
	del valid_y
	del training_scores
	del validation_scores


logger.log_time('ALL RUNS COMPLETED')
logger.mark_dir_complete('./')

	logger.log_time('MODEL COMPILED')
	logger.log_time('BEGINNING TRAINING')

	history = model.fit(
		x=train_x,
		y=train_y,
		epochs=epochs,
		batch_size=256,
		verbose=0,
		callbacks=[epochTimer],
	)

	logger.log_time('TRAINING COMPLETE')

	# CALCULATE TEST SET PERFORMANCE AND WRITE TO OUT FILE
	# -------------------------------------------------- #
	y_prob = model.predict(test_x)
	write_performance_metrics(test_y, y_prob, theoretical_threshold, theoretical_threshold_results_file, minority_size)
	write_performance_metrics(test_y, y_prob, decision_threshold, optimal_threshold_results_file, minority_size)
	write_performance_metrics(test_y, y_prob, 0.5, default_threshold_results_file, minority_size)
	epochTimer.write_timings()

	logger.log_time('RESULTS SAVED')
	logger.log_time('ENDING RUN ' + str(run))


logger.log_time('ALL RUNS COMPLETED')
logger.write_to_file('logs.txt')
logger.mark_dir_complete('./')