Ejemplo n.º 1
0
def run_with_setting(hyperparams, argv=None, batch_size=None):
	# f = open('will_stop.keunwoo', 'w')
	# f.close()
	if os.path.exists('stop_asap.keunwoo'):
		os.remove('stop_asap.keunwoo')
	# pick top-N from label matrix
	dim_labels = hyperparams['dim_labels']	
	
	if hyperparams['is_LDA']: 
		best_result = 1.0 # mse
		criteria = 'mse'
	else:
		best_result = 0.0 # auc
		criteria = 'auc'
	is_getting_better = get_getting_beter_func(hyperparams)

	# label_matrix = np.load(PATH_DATA + FILE_DICT['sorted_merged_label_matrix'])
	# label_matrix = label_matrix[:, :dim_labels]
	hdf_xs = io.load_x(hyperparams['tf_type'], is_test=hyperparams['is_test'])
	hdf_ys = io.load_y(dim_labels, is_test=hyperparams['is_test'], merged=hyperparams['merged'], is_LDA=hyperparams['is_LDA'], is_LDA_normalised=hyperparams['is_LDA_normalised'])
	hdf_train_xs = hdf_xs[:12]
	hdf_valid_xs = hdf_xs[12:13]
	hdf_test_xs = hdf_xs[13:]
	hdf_train_ys = hdf_ys[:12]
	hdf_valid_ys = hdf_ys[12:13]
	hdf_test_ys = hdf_ys[13:]

	# train_x, valid_x, test_x = io.load_x(hyperparams['tf_type'])
	# train_y, valid_y, test_y = io.load_y(dim_labels)
	if hyperparams['is_test']:
		pdb.set_trace()
		# num_data_in_test = 256
		# train_x = train_x[:num_data_in_test]
		# valid_x = valid_x[:num_data_in_test]
		# test_x  = test_x[:num_data_in_test]
		# train_y = train_y[:num_data_in_test]
		# valid_y = valid_y[:num_data_in_test]
		# test_y  = test_y[:num_data_in_test]
		# shuffle = False
		# num_sub_epoch = 1
		
	hyperparams['height_image'] = hdf_train_xs[0].shape[2]
	hyperparams["width_image"]  = hdf_train_xs[0].shape[3]
	
	if hyperparams['model_type'] == 'multi_input':
		mfcc_hdf_xs = io.load_x('mfcc', is_test=hyperparams['is_test'])
		mfcc_hdf_train_xs = mfcc_hdf_xs[:12]
		mfcc_hdf_valid_xs = mfcc_hdf_xs[12:13]
		mfcc_hdf_test_xs  = mfcc_hdf_xs[13:]
		hyperparams['mfcc_height_image'] = mfcc_hdf_train_xs[0].shape[2]
		hyperparams['mfcc_width_image']  = mfcc_hdf_train_xs[0].shape[3]


	hp_manager = hyperparams_manager.Hyperparams_Manager()

	# name, path, ...
	nickname = hp_manager.get_name(hyperparams)
	timename = time.strftime('%m-%d-%Hh%M')
	if hyperparams["is_test"]:
		model_name = 'test_' + nickname
	else:
		model_name = timename + '_' + nickname
	if hyperparams['resume'] != '':
		model_name = model_name + '_from_' + hyperparams['resume']
	hp_manager.save_new_setting(hyperparams)
	print '-'*60
	print 'model name: %s' % model_name
	model_name_dir = model_name + '/'
	model_weight_name_dir = 'w_' + model_name + '/'
	fileout = model_name + '_results'
	
 	# build model
 	model = my_keras_models.build_convnet_model(setting_dict=hyperparams)
 	if not os.path.exists(PATH_RESULTS + model_name_dir):
		os.mkdir(PATH_RESULTS + model_name_dir)
		os.mkdir(PATH_RESULTS + model_name_dir + 'images/')
		os.mkdir(PATH_RESULTS + model_name_dir + 'plots/')
		os.mkdir(PATH_RESULTS_W + model_weight_name_dir)
	hp_manager.write_setting_as_texts(PATH_RESULTS + model_name_dir, hyperparams)
 	hp_manager.print_setting(hyperparams)
 	model.summary()
	# prepare callbacks
	keras_plot(model, to_file=PATH_RESULTS + model_name_dir + 'images/'+'graph_of_model_'+hyperparams["!memo"]+'.png')
	# checkpointer = keras.callbacks.ModelCheckpoint(filepath=PATH_RESULTS_W + model_weight_name_dir + "weights_best.hdf5", 
	# 												 monitor='val_acc',
	# 												verbose=1, 
	# 							             		save_best_only=True)
	weight_image_monitor = my_keras_utils.Weight_Image_Saver(PATH_RESULTS + model_name_dir + 'images/')
	patience = 100
	if hyperparams["is_test"] is True:
		patience = 99999999
	early_stopping = keras.callbacks.EarlyStopping(monitor='val_acc', 
														patience=patience, 
														verbose=0)
	if batch_size == None:
		batch_size = 16
	
	if hyperparams['model_type'] == 'vgg_original':
		batch_size = (batch_size * 3)/5
	# ready to run
	if hyperparams['debug'] == True:
		pdb.set_trace()
	print '--- %s train starts. Remove will_stop.keunwoo to continue learning after %d epochs ---' % (model_name, hyperparams["num_epoch"])
	
	num_epoch = hyperparams["num_epoch"]
	total_epoch = 0
	
	callbacks = [weight_image_monitor]

	total_history = {'loss':[], 'val_loss':[], 'acc':[], 'val_acc':[]}

	# total_label_count = np.sum([hdf_train.shape[0]*hdf_train.shape[1] for hdf_train in hdf_train_ys]) 
	# total_zeros = 
	# print 'With predicting all zero, acc is %0.6f' % ((total_label_count - np.sum(train_y))/float(total_label_count))

	if hyperparams['resume'] != '':
		if os.path.exists(PATH_RESULTS_W + 'w_' + hyperparams['resume']):
			model.load_weights(PATH_RESULTS_W + 'w_' + hyperparams['resume'] + '/weights_best.hdf5')
		if os.path.exists(PATH_RESULTS + hyperparams['resume'] + '/total_history.cP'):
			previous_history = cP.load(open(PATH_RESULTS + hyperparams['resume'] + '/total_history.cP', 'r'))
			print 'previously learned weight: %s is loaded ' % hyperparams['resume']
			append_history(total_history, previous_history)
			best_result = min(total_history[criteria])
	
	if not hyperparams['do_not_learn']:
		my_plots.save_model_as_image(model, save_path=PATH_RESULTS + model_name_dir + 'images/', 
											filename_prefix='local_INIT', 
											normalize='local', 
											mono=True)
		my_plots.save_model_as_image(model, save_path=PATH_RESULTS + model_name_dir + 'images/', 
											filename_prefix='global_INIT', 
											normalize='global', 
											mono=True)
	 	# run
	 	print '--TEST FLIGHT--'
	 	if hyperparams['model_type'] in ['multi_task', 'multi_input']: # multi_input assumes multi_task.
	 		if hyperparams['model_type'] == 'multi_task':
	 			fit_dict = get_fit_dict(hdf_train_xs[-1][-256:], hdf_train_ys[-1][-256:], hyperparams['dim_labels'])
	 		else:
	 			fit_dict = get_fit_dict(hdf_train_xs[-1][-256:], hdf_train_ys[-1][-256:], hyperparams['dim_labels'], mfcc_train_x=mfcc_hdf_train_xs[-1][-256:])
 			# pdb.set_trace()
 			model.fit(fit_dict,	batch_size=batch_size,	nb_epoch=1, shuffle='batch')
	 	else:
	 		model.fit(hdf_train_xs[-1][-256:], hdf_train_ys[-1][-256:], 
			 		validation_data=(hdf_valid_xs[0][:512], hdf_valid_ys[0][:512]), 
					batch_size=batch_size, 
					nb_epoch=1, 
					show_accuracy=hyperparams['isClass'], 
					callbacks=callbacks,
					shuffle='batch')
	 	print '--TEST FLIGHT DONE: %s--' % model_name
	 	total_epoch_count = 0
		while True:
			for sub_epoch_idx, (train_x, train_y) in enumerate(zip(hdf_train_xs, hdf_train_ys)):
				total_epoch_count += 1
				print '      --- I will check stop_asap.keunwoo'
				if os.path.exists('stop_asap.keunwoo') and total_epoch_count > 1:
					print '      --- stop_asap.keunwoo found. will stop now.'
					break
				print '      --- stop_asap.keunwoo NOT found. keep going on..'

				if hyperparams['model_type'] == 'multi_input':
					mfcc_train_x = mfcc_hdf_train_xs[sub_epoch_idx]
				else:
					mfcc_train_x = None
				# early_stop should watch overall AUC rather than val_loss or val_acc
				# [run]
			 	if hyperparams['model_type'] in ['multi_task', 'multi_input']:	
					fit_dict = get_fit_dict(train_x, train_y, hyperparams['dim_labels'], mfcc_train_x=mfcc_train_x)
 					loss_history = model.fit(fit_dict,
 							batch_size=batch_size,
 							nb_epoch=1,
 							shuffle='batch')
			 	else:
					loss_history = model.fit(train_x, train_y, validation_data=(hdf_valid_xs[0][:2048], hdf_valid_ys[0][:2048]), 
											batch_size=batch_size,
											nb_epoch=1, 
											show_accuracy=hyperparams['isClass'], 
											verbose=1, 
											callbacks=callbacks,
											shuffle='batch')
				# [validation]
				if not sub_epoch_idx in [0, 6]: # validation with subset
					
					if hyperparams['model_type'] in ['multi_task', 'multi_input']:
						fit_dict = get_fit_dict(hdf_valid_xs[-1], hdf_valid_ys[-1], hyperparams['dim_labels'], mfcc_train_x=mfcc_hdf_valid_xs[-1])
						predicted_dict = model.predict(fit_dict, batch_size=batch_size)
						predicted = merge_multi_outputs(predicted_dict)
						val_loss_here = model.evaluate(fit_dict, batch_size=batch_size)
						print 'val_loss:%f' % val_loss_here
					else:
						valid_x, valid_y = (hdf_valid_xs[0][:8092], hdf_valid_ys[0][:8092])
						predicted = model.predict(valid_x, batch_size=batch_size)
						
				else: # validation with all
					print ' * Compute AUC with full validation data for model: %s.' % model_name
					if hyperparams['model_type'] in ['multi_task', 'multi_input']:
						valid_y = hdf_valid_ys[0][:] # I know I'm using only one set for validation.
						fit_dict = get_fit_dict(hdf_valid_xs[-1], hdf_valid_ys[-1], hyperparams['dim_labels'], mfcc_train_x=mfcc_hdf_valid_xs[-1])
						predicted_dict = model.predict(fit_dict, batch_size=batch_size)
						predicted = merge_multi_outputs(predicted_dict)
						val_loss_here = model.evaluate(fit_dict, batch_size=batch_size)
						print 'val_loss:%f' % val_loss_here
					else:
						predicted = np.zeros((0, dim_labels))
						valid_y = np.zeros((0, dim_labels))
						for valid_x_partial, valid_y_partial in zip(hdf_valid_xs, hdf_valid_ys):
							predicted = np.vstack((predicted, model.predict(valid_x_partial, batch_size=batch_size)))
							valid_y = np.vstack((valid_y, valid_y_partial))

				# [check if should stop]
				val_result = evaluate_result(valid_y, predicted, hyperparams)
				history = {}
				
				history[criteria] = [val_result[criteria]]
				print '[%d] %s: %f' % (total_epoch_count, criteria, val_result[criteria])
					# history['coverage_error'] = [val_result['coverage_error']]
					# history['label_ranking_average_precision_score'] = [val_result['label_ranking_average_precision_score']]
					# history['label_ranking_loss'] = [val_result['label_ranking_loss']]
				
				if hyperparams['model_type'] in ['multi_task', 'multi_input']:
					history['val_loss'] = [val_loss_here]
								
				
				if is_getting_better(best_result, val_result[criteria]):
					print ', which is new record! it was %f btw (%s)' % (best_result, model_name)
					best_result = val_result[criteria]
					model.save_weights(filepath=PATH_RESULTS_W + model_weight_name_dir + "weights_best.hdf5", 
										overwrite=True)

				else:
					print 'Keep old auc record, %f' % best_result
				
				append_history(total_history, history)
				append_history(total_history, loss_history.history)
				
				my_plots.export_list_png(total_history[criteria], out_filename=PATH_RESULTS + model_name_dir + 'plots/' + ('plot_%s.png' % criteria), title=model_name + criteria + '\n'+hyperparams['!memo'] )
				
				my_plots.export_history(total_history['loss'], total_history['val_loss'], 
													acc=total_history['acc'], 
													val_acc=total_history['val_acc'], 
													out_filename=PATH_RESULTS + model_name_dir + 'plots/' + 'loss_plots.png')
		
			
			print '[%d], %d-th of %d epoch is complete, %s:%f' % (total_epoch_count, total_epoch, num_epoch, criteria, val_result[criteria])
			total_epoch += 1

			if os.path.exists('stop_asap.keunwoo'):
				os.remove('stop_asap.keunwoo')
				break			
			if os.path.exists('will_stop.keunwoo'):	
				if total_epoch > num_epoch:
					break
				else:
					print ' *** will go for %d epochs' % (num_epoch - total_epoch)
			else:
				print ' *** will go for another one epoch. '
				print ' *** $ touch will_stop.keunwoo to stop at the end of this, otherwise it will be endless.'
	# [summarise]
	if hyperparams["debug"] == True:
		pdb.set_trace()
	##################################
	# test with last weights
	predicted = np.zeros((0, dim_labels))
	test_y = np.zeros((0, dim_labels))

	for test_idx, (test_x_partial, test_y_partial) in enumerate(zip(hdf_test_xs, hdf_test_ys)):
		if hyperparams['model_type'] == 'multi_input':
			mfcc_test_x_partial = mfcc_hdf_test_xs[test_idx]
		else:
			mfcc_test_x_partial = None
		if hyperparams['model_type'] in ['multi_task', 'multi_input']:
			fit_dict = get_fit_dict(test_x_partial, test_y_partial, hyperparams['dim_labels'], mfcc_train_x=mfcc_test_x_partial)
			predicted_dict = model.predict(fit_dict, batch_size=batch_size)
			predicted = np.vstack((predicted, merge_multi_outputs(predicted_dict)))
		else:
			predicted = np.vstack((predicted, model.predict(test_x_partial, batch_size=batch_size)))
		test_y = np.vstack((test_y, test_y_partial))
	eval_result_final = evaluate_result(test_y, predicted, hyperparams)
	print '.'*60
	for key in sorted(eval_result_final.keys()):
		print key, eval_result_final[key]
	print '.'*60
	
	#####################

	if not hyperparams['is_test']:
		if not best_result == val_result[criteria]: # load weights only it's necessary
			print 'Load best weight for test sets'
			model.load_weights(PATH_RESULTS_W + model_weight_name_dir + "weights_best.hdf5") 
	
	predicted = np.zeros((0, dim_labels))
	test_y = np.zeros((0, dim_labels))

	for test_idx, (test_x_partial, test_y_partial) in enumerate(zip(hdf_test_xs, hdf_test_ys)):
		if hyperparams['model_type'] == 'multi_input':
			mfcc_test_x_partial = mfcc_hdf_test_xs[test_idx]
		else:
			mfcc_test_x_partial = None
		if hyperparams['model_type'] in ['multi_task', 'multi_input']:
			fit_dict = get_fit_dict(test_x_partial, test_y_partial, hyperparams['dim_labels'], mfcc_train_x=mfcc_test_x_partial)
			predicted_dict = model.predict(fit_dict, batch_size=batch_size)
			predicted = np.vstack((predicted, merge_multi_outputs(predicted_dict)))
		else:
			predicted = np.vstack((predicted, model.predict(test_x_partial, batch_size=batch_size)))
		test_y = np.vstack((test_y, test_y_partial))
	eval_result_final = evaluate_result(test_y, predicted, hyperparams)
	print '.'*60
	for key in sorted(eval_result_final.keys()):
		print key, eval_result_final[key]
	print '.'*60
	
	#save results

	cP.dump(total_history, open(PATH_RESULTS + model_name_dir + 'total_history.cP', 'w'))
	# np.save(PATH_RESULTS + model_name_dir + 'loss_testset.npy', loss_testset)
	np.save(PATH_RESULTS + model_name_dir + 'predicted_and_truths_result.npy', [predicted, test_y])
	np.save(PATH_RESULTS + model_name_dir + 'weights_changes.npy', np.array(weight_image_monitor.weights_changes))

	# ADD weight change saving code
	if total_history != {}:
		# max_auc = np.max(total_history['auc'])
		best_batch = np.argmax(total_history[criteria])+1
		num_run_epoch = len(total_history[criteria])
		oneline_result = '%6.4f, %s %d_of_%d, %s' % (best_result, criteria, best_batch, num_run_epoch, model_name)
		with open(PATH_RESULTS + model_name_dir + oneline_result, 'w') as f:
			pass
		f = open( (PATH_RESULTS + '%s_%s_%s_%06.4f_at_(%d_of_%d)_%s'  % \
			(timename, hyperparams["loss_function"], criteria, best_result, best_batch, num_run_epoch, nickname)), 'w')
		f.close()
		with open('one_line_log.txt', 'a') as f:
			f.write(oneline_result)
			f.write(' ' + ' '.join(argv) + '\n')
	else:
		max_auc = 0.0
	print '========== DONE: %s ==========' % model_name
	return best_result
Ejemplo n.º 2
0
def run_with_setting(hyperparams, argv=None, batch_size=None):
    # f = open('will_stop.keunwoo', 'w')
    # f.close()
    if os.path.exists('stop_asap.keunwoo'):
        os.remove('stop_asap.keunwoo')
    # pick top-N from label matrix
    dim_labels = hyperparams['dim_labels']

    if hyperparams['is_LDA']:
        best_result = 1.0  # mse
        criteria = 'mse'
    else:
        best_result = 0.0  # auc
        criteria = 'auc'
    is_getting_better = get_getting_beter_func(hyperparams)

    # label_matrix = np.load(PATH_DATA + FILE_DICT['sorted_merged_label_matrix'])
    # label_matrix = label_matrix[:, :dim_labels]
    hdf_xs = io.load_x(hyperparams['tf_type'], is_test=hyperparams['is_test'])
    hdf_ys = io.load_y(dim_labels,
                       is_test=hyperparams['is_test'],
                       merged=hyperparams['merged'],
                       is_LDA=hyperparams['is_LDA'],
                       is_LDA_normalised=hyperparams['is_LDA_normalised'])
    hdf_train_xs = hdf_xs[:12]
    hdf_valid_xs = hdf_xs[12:13]
    hdf_test_xs = hdf_xs[13:]
    hdf_train_ys = hdf_ys[:12]
    hdf_valid_ys = hdf_ys[12:13]
    hdf_test_ys = hdf_ys[13:]

    # train_x, valid_x, test_x = io.load_x(hyperparams['tf_type'])
    # train_y, valid_y, test_y = io.load_y(dim_labels)
    if hyperparams['is_test']:
        pdb.set_trace()
        # num_data_in_test = 256
        # train_x = train_x[:num_data_in_test]
        # valid_x = valid_x[:num_data_in_test]
        # test_x  = test_x[:num_data_in_test]
        # train_y = train_y[:num_data_in_test]
        # valid_y = valid_y[:num_data_in_test]
        # test_y  = test_y[:num_data_in_test]
        # shuffle = False
        # num_sub_epoch = 1

    hyperparams['height_image'] = hdf_train_xs[0].shape[2]
    hyperparams["width_image"] = hdf_train_xs[0].shape[3]

    if hyperparams['model_type'] == 'multi_input':
        mfcc_hdf_xs = io.load_x('mfcc', is_test=hyperparams['is_test'])
        mfcc_hdf_train_xs = mfcc_hdf_xs[:12]
        mfcc_hdf_valid_xs = mfcc_hdf_xs[12:13]
        mfcc_hdf_test_xs = mfcc_hdf_xs[13:]
        hyperparams['mfcc_height_image'] = mfcc_hdf_train_xs[0].shape[2]
        hyperparams['mfcc_width_image'] = mfcc_hdf_train_xs[0].shape[3]

    hp_manager = hyperparams_manager.Hyperparams_Manager()

    # name, path, ...
    nickname = hp_manager.get_name(hyperparams)
    timename = time.strftime('%m-%d-%Hh%M')
    if hyperparams["is_test"]:
        model_name = 'test_' + nickname
    else:
        model_name = timename + '_' + nickname
    if hyperparams['resume'] != '':
        model_name = model_name + '_from_' + hyperparams['resume']
    hp_manager.save_new_setting(hyperparams)
    print '-' * 60
    print 'model name: %s' % model_name
    model_name_dir = model_name + '/'
    model_weight_name_dir = 'w_' + model_name + '/'
    fileout = model_name + '_results'

    # build model
    model = my_keras_models.build_convnet_model(setting_dict=hyperparams)
    if not os.path.exists(PATH_RESULTS + model_name_dir):
        os.mkdir(PATH_RESULTS + model_name_dir)
        os.mkdir(PATH_RESULTS + model_name_dir + 'images/')
        os.mkdir(PATH_RESULTS + model_name_dir + 'plots/')
        os.mkdir(PATH_RESULTS_W + model_weight_name_dir)
    hp_manager.write_setting_as_texts(PATH_RESULTS + model_name_dir,
                                      hyperparams)
    hp_manager.print_setting(hyperparams)
    model.summary()
    # prepare callbacks
    keras_plot(model,
               to_file=PATH_RESULTS + model_name_dir + 'images/' +
               'graph_of_model_' + hyperparams["!memo"] + '.png')
    # checkpointer = keras.callbacks.ModelCheckpoint(filepath=PATH_RESULTS_W + model_weight_name_dir + "weights_best.hdf5",
    # 												 monitor='val_acc',
    # 												verbose=1,
    # 							             		save_best_only=True)
    weight_image_monitor = my_keras_utils.Weight_Image_Saver(PATH_RESULTS +
                                                             model_name_dir +
                                                             'images/')
    patience = 100
    if hyperparams["is_test"] is True:
        patience = 99999999
    early_stopping = keras.callbacks.EarlyStopping(monitor='val_acc',
                                                   patience=patience,
                                                   verbose=0)
    if batch_size == None:
        batch_size = 16

    if hyperparams['model_type'] == 'vgg_original':
        batch_size = (batch_size * 3) / 5
    # ready to run
    if hyperparams['debug'] == True:
        pdb.set_trace()
    print '--- %s train starts. Remove will_stop.keunwoo to continue learning after %d epochs ---' % (
        model_name, hyperparams["num_epoch"])

    num_epoch = hyperparams["num_epoch"]
    total_epoch = 0

    callbacks = [weight_image_monitor]

    total_history = {'loss': [], 'val_loss': [], 'acc': [], 'val_acc': []}

    # total_label_count = np.sum([hdf_train.shape[0]*hdf_train.shape[1] for hdf_train in hdf_train_ys])
    # total_zeros =
    # print 'With predicting all zero, acc is %0.6f' % ((total_label_count - np.sum(train_y))/float(total_label_count))

    if hyperparams['resume'] != '':
        if os.path.exists(PATH_RESULTS_W + 'w_' + hyperparams['resume']):
            model.load_weights(PATH_RESULTS_W + 'w_' + hyperparams['resume'] +
                               '/weights_best.hdf5')
        if os.path.exists(PATH_RESULTS + hyperparams['resume'] +
                          '/total_history.cP'):
            previous_history = cP.load(
                open(
                    PATH_RESULTS + hyperparams['resume'] + '/total_history.cP',
                    'r'))
            print 'previously learned weight: %s is loaded ' % hyperparams[
                'resume']
            append_history(total_history, previous_history)
            best_result = min(total_history[criteria])

    if not hyperparams['do_not_learn']:
        my_plots.save_model_as_image(model,
                                     save_path=PATH_RESULTS + model_name_dir +
                                     'images/',
                                     filename_prefix='local_INIT',
                                     normalize='local',
                                     mono=True)
        my_plots.save_model_as_image(model,
                                     save_path=PATH_RESULTS + model_name_dir +
                                     'images/',
                                     filename_prefix='global_INIT',
                                     normalize='global',
                                     mono=True)
        # run
        print '--TEST FLIGHT--'
        if hyperparams['model_type'] in ['multi_task', 'multi_input'
                                         ]:  # multi_input assumes multi_task.
            if hyperparams['model_type'] == 'multi_task':
                fit_dict = get_fit_dict(hdf_train_xs[-1][-256:],
                                        hdf_train_ys[-1][-256:],
                                        hyperparams['dim_labels'])
            else:
                fit_dict = get_fit_dict(
                    hdf_train_xs[-1][-256:],
                    hdf_train_ys[-1][-256:],
                    hyperparams['dim_labels'],
                    mfcc_train_x=mfcc_hdf_train_xs[-1][-256:])

# pdb.set_trace()
            model.fit(fit_dict,
                      batch_size=batch_size,
                      nb_epoch=1,
                      shuffle='batch')
        else:
            model.fit(hdf_train_xs[-1][-256:],
                      hdf_train_ys[-1][-256:],
                      validation_data=(hdf_valid_xs[0][:512],
                                       hdf_valid_ys[0][:512]),
                      batch_size=batch_size,
                      nb_epoch=1,
                      show_accuracy=hyperparams['isClass'],
                      callbacks=callbacks,
                      shuffle='batch')
        print '--TEST FLIGHT DONE: %s--' % model_name
        total_epoch_count = 0
        while True:
            for sub_epoch_idx, (train_x, train_y) in enumerate(
                    zip(hdf_train_xs, hdf_train_ys)):
                total_epoch_count += 1
                print '      --- I will check stop_asap.keunwoo'
                if os.path.exists(
                        'stop_asap.keunwoo') and total_epoch_count > 1:
                    print '      --- stop_asap.keunwoo found. will stop now.'
                    break
                print '      --- stop_asap.keunwoo NOT found. keep going on..'

                if hyperparams['model_type'] == 'multi_input':
                    mfcc_train_x = mfcc_hdf_train_xs[sub_epoch_idx]
                else:
                    mfcc_train_x = None
                # early_stop should watch overall AUC rather than val_loss or val_acc
                # [run]
                if hyperparams['model_type'] in ['multi_task', 'multi_input']:
                    fit_dict = get_fit_dict(train_x,
                                            train_y,
                                            hyperparams['dim_labels'],
                                            mfcc_train_x=mfcc_train_x)
                    loss_history = model.fit(fit_dict,
                                             batch_size=batch_size,
                                             nb_epoch=1,
                                             shuffle='batch')
                else:
                    loss_history = model.fit(
                        train_x,
                        train_y,
                        validation_data=(hdf_valid_xs[0][:2048],
                                         hdf_valid_ys[0][:2048]),
                        batch_size=batch_size,
                        nb_epoch=1,
                        show_accuracy=hyperparams['isClass'],
                        verbose=1,
                        callbacks=callbacks,
                        shuffle='batch')
                # [validation]
                if not sub_epoch_idx in [0, 6]:  # validation with subset

                    if hyperparams['model_type'] in [
                            'multi_task', 'multi_input'
                    ]:
                        fit_dict = get_fit_dict(
                            hdf_valid_xs[-1],
                            hdf_valid_ys[-1],
                            hyperparams['dim_labels'],
                            mfcc_train_x=mfcc_hdf_valid_xs[-1])
                        predicted_dict = model.predict(fit_dict,
                                                       batch_size=batch_size)
                        predicted = merge_multi_outputs(predicted_dict)
                        val_loss_here = model.evaluate(fit_dict,
                                                       batch_size=batch_size)
                        print 'val_loss:%f' % val_loss_here
                    else:
                        valid_x, valid_y = (hdf_valid_xs[0][:8092],
                                            hdf_valid_ys[0][:8092])
                        predicted = model.predict(valid_x,
                                                  batch_size=batch_size)

                else:  # validation with all
                    print ' * Compute AUC with full validation data for model: %s.' % model_name
                    if hyperparams['model_type'] in [
                            'multi_task', 'multi_input'
                    ]:
                        valid_y = hdf_valid_ys[
                            0][:]  # I know I'm using only one set for validation.
                        fit_dict = get_fit_dict(
                            hdf_valid_xs[-1],
                            hdf_valid_ys[-1],
                            hyperparams['dim_labels'],
                            mfcc_train_x=mfcc_hdf_valid_xs[-1])
                        predicted_dict = model.predict(fit_dict,
                                                       batch_size=batch_size)
                        predicted = merge_multi_outputs(predicted_dict)
                        val_loss_here = model.evaluate(fit_dict,
                                                       batch_size=batch_size)
                        print 'val_loss:%f' % val_loss_here
                    else:
                        predicted = np.zeros((0, dim_labels))
                        valid_y = np.zeros((0, dim_labels))
                        for valid_x_partial, valid_y_partial in zip(
                                hdf_valid_xs, hdf_valid_ys):
                            predicted = np.vstack(
                                (predicted,
                                 model.predict(valid_x_partial,
                                               batch_size=batch_size)))
                            valid_y = np.vstack((valid_y, valid_y_partial))

                # [check if should stop]
                val_result = evaluate_result(valid_y, predicted, hyperparams)
                history = {}

                history[criteria] = [val_result[criteria]]
                print '[%d] %s: %f' % (total_epoch_count, criteria,
                                       val_result[criteria])
                # history['coverage_error'] = [val_result['coverage_error']]
                # history['label_ranking_average_precision_score'] = [val_result['label_ranking_average_precision_score']]
                # history['label_ranking_loss'] = [val_result['label_ranking_loss']]

                if hyperparams['model_type'] in ['multi_task', 'multi_input']:
                    history['val_loss'] = [val_loss_here]

                if is_getting_better(best_result, val_result[criteria]):
                    print ', which is new record! it was %f btw (%s)' % (
                        best_result, model_name)
                    best_result = val_result[criteria]
                    model.save_weights(filepath=PATH_RESULTS_W +
                                       model_weight_name_dir +
                                       "weights_best.hdf5",
                                       overwrite=True)

                else:
                    print 'Keep old auc record, %f' % best_result

                append_history(total_history, history)
                append_history(total_history, loss_history.history)

                my_plots.export_list_png(
                    total_history[criteria],
                    out_filename=PATH_RESULTS + model_name_dir + 'plots/' +
                    ('plot_%s.png' % criteria),
                    title=model_name + criteria + '\n' + hyperparams['!memo'])

                my_plots.export_history(total_history['loss'],
                                        total_history['val_loss'],
                                        acc=total_history['acc'],
                                        val_acc=total_history['val_acc'],
                                        out_filename=PATH_RESULTS +
                                        model_name_dir + 'plots/' +
                                        'loss_plots.png')

            print '[%d], %d-th of %d epoch is complete, %s:%f' % (
                total_epoch_count, total_epoch, num_epoch, criteria,
                val_result[criteria])
            total_epoch += 1

            if os.path.exists('stop_asap.keunwoo'):
                os.remove('stop_asap.keunwoo')
                break
            if os.path.exists('will_stop.keunwoo'):
                if total_epoch > num_epoch:
                    break
                else:
                    print ' *** will go for %d epochs' % (num_epoch -
                                                          total_epoch)
            else:
                print ' *** will go for another one epoch. '
                print ' *** $ touch will_stop.keunwoo to stop at the end of this, otherwise it will be endless.'
    # [summarise]
    if hyperparams["debug"] == True:
        pdb.set_trace()
    ##################################
    # test with last weights
    predicted = np.zeros((0, dim_labels))
    test_y = np.zeros((0, dim_labels))

    for test_idx, (test_x_partial,
                   test_y_partial) in enumerate(zip(hdf_test_xs, hdf_test_ys)):
        if hyperparams['model_type'] == 'multi_input':
            mfcc_test_x_partial = mfcc_hdf_test_xs[test_idx]
        else:
            mfcc_test_x_partial = None
        if hyperparams['model_type'] in ['multi_task', 'multi_input']:
            fit_dict = get_fit_dict(test_x_partial,
                                    test_y_partial,
                                    hyperparams['dim_labels'],
                                    mfcc_train_x=mfcc_test_x_partial)
            predicted_dict = model.predict(fit_dict, batch_size=batch_size)
            predicted = np.vstack(
                (predicted, merge_multi_outputs(predicted_dict)))
        else:
            predicted = np.vstack(
                (predicted, model.predict(test_x_partial,
                                          batch_size=batch_size)))
        test_y = np.vstack((test_y, test_y_partial))
    eval_result_final = evaluate_result(test_y, predicted, hyperparams)
    print '.' * 60
    for key in sorted(eval_result_final.keys()):
        print key, eval_result_final[key]
    print '.' * 60

    #####################

    if not hyperparams['is_test']:
        if not best_result == val_result[
                criteria]:  # load weights only it's necessary
            print 'Load best weight for test sets'
            model.load_weights(PATH_RESULTS_W + model_weight_name_dir +
                               "weights_best.hdf5")

    predicted = np.zeros((0, dim_labels))
    test_y = np.zeros((0, dim_labels))

    for test_idx, (test_x_partial,
                   test_y_partial) in enumerate(zip(hdf_test_xs, hdf_test_ys)):
        if hyperparams['model_type'] == 'multi_input':
            mfcc_test_x_partial = mfcc_hdf_test_xs[test_idx]
        else:
            mfcc_test_x_partial = None
        if hyperparams['model_type'] in ['multi_task', 'multi_input']:
            fit_dict = get_fit_dict(test_x_partial,
                                    test_y_partial,
                                    hyperparams['dim_labels'],
                                    mfcc_train_x=mfcc_test_x_partial)
            predicted_dict = model.predict(fit_dict, batch_size=batch_size)
            predicted = np.vstack(
                (predicted, merge_multi_outputs(predicted_dict)))
        else:
            predicted = np.vstack(
                (predicted, model.predict(test_x_partial,
                                          batch_size=batch_size)))
        test_y = np.vstack((test_y, test_y_partial))
    eval_result_final = evaluate_result(test_y, predicted, hyperparams)
    print '.' * 60
    for key in sorted(eval_result_final.keys()):
        print key, eval_result_final[key]
    print '.' * 60

    #save results

    cP.dump(total_history,
            open(PATH_RESULTS + model_name_dir + 'total_history.cP', 'w'))
    # np.save(PATH_RESULTS + model_name_dir + 'loss_testset.npy', loss_testset)
    np.save(PATH_RESULTS + model_name_dir + 'predicted_and_truths_result.npy',
            [predicted, test_y])
    np.save(PATH_RESULTS + model_name_dir + 'weights_changes.npy',
            np.array(weight_image_monitor.weights_changes))

    # ADD weight change saving code
    if total_history != {}:
        # max_auc = np.max(total_history['auc'])
        best_batch = np.argmax(total_history[criteria]) + 1
        num_run_epoch = len(total_history[criteria])
        oneline_result = '%6.4f, %s %d_of_%d, %s' % (
            best_result, criteria, best_batch, num_run_epoch, model_name)
        with open(PATH_RESULTS + model_name_dir + oneline_result, 'w') as f:
            pass
        f = open( (PATH_RESULTS + '%s_%s_%s_%06.4f_at_(%d_of_%d)_%s'  % \
         (timename, hyperparams["loss_function"], criteria, best_result, best_batch, num_run_epoch, nickname)), 'w')
        f.close()
        with open('one_line_log.txt', 'a') as f:
            f.write(oneline_result)
            f.write(' ' + ' '.join(argv) + '\n')
    else:
        max_auc = 0.0
    print '========== DONE: %s ==========' % model_name
    return best_result
Ejemplo n.º 3
0
def run_with_setting(hyperparams, argv):
	print '#'*60
	#function: input args: TR_CONST, sys.argv.
	# -------------------------------
	if os.path.exists('stop_asap.keunwoo'):
		os.remove('stop_asap.keunwoo')
	
	if hyperparams["is_test"]:
		print '==== This is a test, to quickly check the code. ===='
		print 'excuted by $ ' + ' '.join(argv)
	
	mse_history = []
	# label matrix
	dim_latent_feature = hyperparams["dim_labels"]
	# label_matrix_filename = (FILE_DICT["mood_latent_matrix"] % dim_latent_feature)
	label_matrix_filename = (FILE_DICT["mood_latent_tfidf_matrix"] % dim_latent_feature) # tfidf is better!
	
	if os.path.exists(PATH_DATA + label_matrix_filename):
		label_matrix = np.load(PATH_DATA + label_matrix_filename) #np matrix, 9320-by-100
	else:
		"print let's create a new mood-latent feature matrix"
		import main_prepare
		mood_tags_matrix = np.load(PATH_DATA + label_matrix_filename) #np matrix, 9320-by-100
		label_matrix = main_prepare.get_LDA(X=mood_tags_matrix, 
											num_components=k, 
											show_topics=False)
		np.save(PATH_DATA + label_matrix_filename, W)
	# print 'size of mood tag matrix:'
	print label_matrix.shape

	# load dataset
	train_x, valid_x, test_x, = my_utils.load_all_sets_from_hdf(tf_type=hyperparams["tf_type"],
																				n_dim=dim_latent_feature,
																				task_cla=hyperparams['isClass'])
	# *_y is not correct - 01 Jan 2016. Use numpy files directly.
	train_y, valid_y, test_y = my_utils.load_all_labels(n_dim=dim_latent_feature, 
														num_fold=10, 
														clips_per_song=3)
	if hyperparams["is_test"]:
		train_x, valid_x, test_x, train_y, valid_y, test_y = [ele[:64] for ele in [train_x, valid_x, test_x, train_y, valid_y, test_y]]

	threshold_label = 1.0
	if hyperparams['isClass']:
		train_y = (train_y>=threshold_label).astype(int)
		valid_y = (valid_y>=threshold_label).astype(int)
		test_y = (test_y>=threshold_label).astype(int)
	
	# print 'temporary came back with numpy loading'
	# if hyperparams["debug"]:
	# 	num_train_songs = 30
	# else:
	# 	num_train_songs = 1000
	# train_x, train_y, valid_x, valid_y, test_x, test_y = my_utils.load_all_sets(label_matrix, 
	# 																			hyperparams=hyperparams)

	hyperparams["height_image"] = train_x.shape[2]
	hyperparams["width_image"]  = train_x.shape[3]
	if hyperparams["debug"]:
		pdb.set_trace()
	
	moodnames = cP.load(open(PATH_DATA + FILE_DICT["moodnames"], 'r')) #list, 100
	# train_x : (num_samples, num_channel, height, width)	
	hp_manager = hyperparams_manager.Hyperparams_Manager()
	nickname = hp_manager.get_name(hyperparams)
	timename = time.strftime('%m-%d-%Hh%M')
	if hyperparams["is_test"]:
		model_name = 'test_' + nickname
	else:
		model_name = timename + '_' + nickname
	hp_manager.save_new_setting(hyperparams)
	print '-'*60
	print 'model name: %s' % model_name
	model_name_dir = model_name + '/'
	model_weight_name_dir = 'w_' + model_name + '/'
	fileout = model_name + '_results'
	 	
	model = my_keras_models.build_convnet_model(setting_dict=hyperparams)
	model.summary()
	if not os.path.exists(PATH_RESULTS + model_name_dir):
		os.mkdir(PATH_RESULTS + model_name_dir)
		os.mkdir(PATH_RESULTS + model_name_dir + 'images/')
		os.mkdir(PATH_RESULTS + model_name_dir + 'plots/')
		os.mkdir(PATH_RESULTS_W + model_weight_name_dir)
	
	hp_manager.write_setting_as_texts(PATH_RESULTS + model_name_dir, hyperparams)
 	hp_manager.print_setting(hyperparams)

 	keras_plot(model, to_file=PATH_RESULTS + model_name_dir + 'images/'+'graph_of_model_'+hyperparams["!memo"]+'.png')
	#prepare callbacks
	weight_image_monitor = my_keras_utils.Weight_Image_Saver(PATH_RESULTS + model_name_dir + 'images/')
	patience = 3
	if hyperparams["is_test"] is True:
		patience = 99999999
	if hyperparams["isRegre"]:
		value_to_monitor = 'val_loss'
	else:
		value_to_monitor = 'val_acc'
		#history = my_keras_utils.History_Regression_Val()
	# early_stopping = keras.callbacks.EarlyStopping(monitor=value_to_monitor, 
	# 												patience=patience, 
	# 												verbose=0)
	
	# other constants
	batch_size = 16
	# if hyperparams['model_type'] == 'vgg_original':
	# 	batch_size = (batch_size * 3)/5

	predicted = model.predict(test_x, batch_size=batch_size)
	if hyperparams['debug'] == True:
		pdb.set_trace()
	print 'mean of target value:'
	if hyperparams['isRegre']:
		print np.mean(test_y, axis=0)
	else:
		print np.sum(test_y, axis=0)
	print 'mean of predicted value:'
	if hyperparams['isRegre']:
		print np.mean(predicted, axis=0)
	else:
		print np.sum(predicted, axis=0)
	print 'mse with just predicting average is %f' % np.mean((test_y - np.mean(test_y, axis=0))**2)
	np.save(PATH_RESULTS + model_name_dir + 'predicted_and_truths_init.npy', [predicted[:len(test_y)], test_y[:len(test_y)]])
	#train!	
	print '--- train starts. Remove will_stop.keunwoo to continue learning after %d epochs ---' % hyperparams["num_epoch"]
	f = open('will_stop.keunwoo', 'w')
	f.close()
	total_history = {}
	num_epoch = hyperparams["num_epoch"]
	total_epoch = 0
	
	callbacks = [weight_image_monitor]
	best_mse = 0.5

	while True:
		# [run]
		if os.path.exists('stop_asap.keunwoo'):
			print ' stop by stop_asap.keunwoo file'
			break
		history = model.fit(train_x, train_y, validation_data=(valid_x, valid_y), 
											batch_size=batch_size, 
											nb_epoch=1, 
											show_accuracy=hyperparams['isClass'], 
											verbose=1, 
											callbacks=callbacks,
											shuffle='batch')
		my_utils.append_history(total_history, history.history)
		# [validation]
		val_result = evaluate_result(valid_y, predicted) # mse
		if val_result['mse'] < best_mse:
			model.save_weights(PATH_RESULTS_W + model_weight_name_dir + "weights_best.hdf5", overwrite=True)
			best_mse = val_result['mse']
		mse_history.append(val_result['mse'])

		print '%d-th of %d epoch is complete' % (total_epoch, num_epoch)
		total_epoch += 1
		
		if os.path.exists('will_stop.keunwoo'):
			loss_testset = model.evaluate(test_x, test_y, show_accuracy=False, batch_size=batch_size)
		else:
			
			print ' *** will go for another one epoch. '
			print ' *** $ touch will_stop.keunwoo to stop at the end of this, otherwise it will be endless.'
	#
	best_batch = np.argmin(mse_history)+1

	model.load_weights(PATH_RESULTS_W + model_weight_name_dir + "weights_best.hdf5") 

	predicted = model.predict(test_x, batch_size=batch_size)
	print 'predicted example using best model'
	print predicted[:10]
	print 'and truths'
	print test_y[:10]
	#save results
	np.save(PATH_RESULTS + model_name_dir + fileout + '_history.npy', [total_history['loss'], total_history['val_loss']])
	np.save(PATH_RESULTS + model_name_dir + fileout + '_loss_testset.npy', loss_testset)
	np.save(PATH_RESULTS + model_name_dir + 'predicted_and_truths_result.npy', [predicted, test_y])
	# np.save(PATH_RESULTS + model_name_dir + 'weights_changes.npy', np.array(weight_image_monitor.weights_changes))

	# ADD weight change saving code
	my_plots.export_history(total_history['loss'], total_history['val_loss'],
											acc=None, 
											val_acc=None, 
											out_filename=PATH_RESULTS + model_name_dir + 'plots/' + 'plots.png')
	
	
	min_loss = np.min(total_history[value_to_monitor])
	best_batch = np.argmin(total_history[value_to_monitor])+1
	num_run_epoch = len(total_history[value_to_monitor])
	oneline_result = '%s, %6.4f, %d_of_%d, %s' % (value_to_monitor, min_loss, best_batch, num_run_epoch, model_name)
	with open(PATH_RESULTS + model_name_dir + oneline_result, 'w') as f:
		pass
	f = open( (PATH_RESULTS + '%s_%s_%s_%06.4f_at_(%d_of_%d)_mse_%06.4f_%s'  % \
		(timename, hyperparams["loss_function"], value_to_monitor, min_loss, best_batch, num_run_epoch, best_mse, nickname)), 'w')
	f.close()
	with open('one_line_log.txt', 'a') as f:
		f.write(oneline_result)
		f.write(' ' + ' '.join(argv) + '\n')
	print '========== DONE: %s ==========' % model_name
	return min_loss