Beispiel #1
0
def update_res(config_results, mydir, latest):
	if (os.path.exists(latest)):
		shutil.rmtree(latest)
	
	text = 'var configs = ['
	for config in config_results[:-1]:
		text += str(config) + ','
	
	text += str(config_results[-1]) + '];'

	if (os.path.exists('config.js')):
		os.remove('config.js')
	
	Metrics.saveConfig('config.js', text)
	Metrics.copyDirectory(mydir, latest)
Beispiel #2
0
					#MSE (Training and Validation)
					Metrics.plot_mse_curve(np.array(error_train), np.array(error_valid), configDir)

					#Area Under ROC Curve
					roc_area = Metrics.plot_roc_curve(targetByClass, prob_predictions, configDir)

					#precision
					acurracy = ((len(base['testing']['data'])-errors_total)/len(base['testing']['data']))*100

					print("acurracy:", acurracy,'%')
					print('errors',errors_total,'of', len(base['testing']['data']))
					
					configDesc = {'opt_samp':opt_samp.name, 'opt_learning':opt_learning, 'activation_function_options':opt_actvfunc, 'topology_options':opt_top}

					current_config_result = {'config':configDesc, 'results':{'mse':test_mse,'confusion':{'true_positive':confusion_matrix_percentage[0][0],'false_positive':confusion_matrix_percentage[0][1],'false_negative':confusion_matrix_percentage[1][0],'true_negative':confusion_matrix_percentage[1][1]},'roc':roc_area,'precision':acurracy}}
					config_results.append(current_config_result.copy())

					Metrics.saveConfig(os.path.join(configDir, 'config-results.json'), current_config_result)

					nConfig = nConfig+1
					current_config_result = {}

					#reset databases from sampling changes
					training = training_bkp.copy()
					validation = validation_bkp.copy()
					testing = testing_bkp.copy()
					
					update_res(config_results, mydir, latest)
	
	update_res(config_results, mydir, latest)