Beispiel #1
0
	def __call__(self, nn, train_history):
	    current_valid = train_history[-1]['valid_loss']
	    current_epoch = train_history[-1]['epoch']
	    if current_valid < self.best_valid:
	        self.best_valid = current_valid
	        self.best_valid_epoch = current_epoch
	        self.best_weights = nn.get_all_params_values()
	    elif self.best_valid_epoch + self.patience < current_epoch:
	        print("Early stopping.")
	        print("Best valid loss was {:.6f} at epoch {}.".format(
	            self.best_valid, self.best_valid_epoch))
	        nn.load_params_from(self.best_weights)
	        raise StopIteration()
    dense_num_units=512,
    dense_nonlinearity=lasagne.nonlinearities.rectify,
    # dropout2
    dropout2_p=0.5,

    # output
    output_nonlinearity=lasagne.nonlinearities.softmax,
    output_num_units=2,
    # optimization method params
    update=nesterov_momentum,
    update_learning_rate=0.05,
    update_momentum=0.9,
    max_epochs=10,
    verbose=1)

# Train the network
nn = net1.fit(X, Y)

# Train the network
nn = net1.fit(X, Y)

#Visualize the feature
visualize.plot_conv_weights(net1.layers_['conv2d2'])

#Get weight
result = net1.get_all_params_values()
conv2d1 = result['conv2d1']
maxpool1 = result['maxpool1']
conv2d2 = result['conv2d2']
maxpool2 = result['maxpool2']
Beispiel #3
0
    X_test = data_preprocess_test(X_test)
    total_test_time_points = len(X_test) // NO_TIME_POINTS
    remainder_test_points = len(X_test) % NO_TIME_POINTS

    no_rows = total_test_time_points * NO_TIME_POINTS
    X_test = X_test[0:no_rows, :]

    X_test = X_test.transpose()
    X_test_Samples = np.split(X_test, total_test_time_points, axis=1)
    X_test = np.asarray(X_test_Samples)


###########################################################################
#######get predictions and write to files for series 9 and series 10#######
    print("Testing subject%d...." %(subject))
    params = net.get_all_params_values()
    learned_weights = net.load_params_from(params)
    probabilities = net.predict_proba(X_test)

    sub9 = 'subj{0}_series{1}'.format(subject, 9)
    data_len9 = test_dict[sub9]
    total_time_points9 = data_len9 // NO_TIME_POINTS
    remainder_data9 = data_len9 % NO_TIME_POINTS

    sub10 = 'subj{0}_series{1}'.format(subject, 10)
    data_len10 = test_dict[sub10]
    total_time_points10 = data_len10 // NO_TIME_POINTS
    remainder_data10 = data_len10 % NO_TIME_POINTS

    total_test_points = total_time_points9+total_time_points10
Beispiel #4
0
    X_test = data_preprocess_test(X_test)
    total_test_time_points = len(X_test) // NO_TIME_POINTS
    remainder_test_points = len(X_test) % NO_TIME_POINTS

    no_rows = total_test_time_points * NO_TIME_POINTS
    X_test = X_test[0:no_rows, :]

    X_test = X_test.transpose()
    X_test_Samples = np.split(X_test, total_test_time_points, axis=1)
    X_test = np.asarray(X_test_Samples)


###########################################################################
#######get predictions and write to files for series 9 and series 10#######
    print("Testing subject%d...." %(subject))
    params = net.get_all_params_values()
    learned_weights = net.load_params_from(params)
    probabilities = net.predict_proba(X_test)

    sub9 = 'subj{0}_series{1}'.format(subject, 9)
    data_len9 = test_dict[sub9]
    total_time_points9 = data_len9 // NO_TIME_POINTS
    remainder_data9 = data_len9 % NO_TIME_POINTS

    sub10 = 'subj{0}_series{1}'.format(subject, 10)
    data_len10 = test_dict[sub10]
    total_time_points10 = data_len10 // NO_TIME_POINTS
    remainder_data10 = data_len10 % NO_TIME_POINTS

    total_test_points = total_time_points9+total_time_points10
Beispiel #5
0
def build_dbn():
	cols = ['Start']

	t = 1
	channels = 14
	batch_size = None #None = arbitary batch size
	hidden_layer_size = 100
	N_EVENTS = 1
	max_epochs = 500
	NO_TIME_POINTS = 177
	
	ids_tot = []
	pred_tot = []
	test_dict = dict()
	
	test_total = 0
	
	net = NeuralNet(
		layers=[
			('input', layers.InputLayer),
			('conv1', layers.Conv1DLayer),
			('conv2', layers.Conv1DLayer),
			('pool1', layers.MaxPool1DLayer),
			('dropout2', layers.DropoutLayer),
			('hidden4', layers.DenseLayer),
			('dropout3', layers.DropoutLayer),
			('hidden5', layers.DenseLayer),
			('dropout4', layers.DropoutLayer),
			('output', layers.DenseLayer),
		],
		input_shape=(batch_size, channels, NO_TIME_POINTS),
		conv1_num_filters=4, conv1_filter_size=1,
		conv1_nonlinearity=None,
		conv2_num_filters=8, conv2_filter_size=5,
		pool1_pool_size=4,
		dropout2_p=0.5, hidden4_num_units=hidden_layer_size,
		dropout3_p=0.3, hidden5_num_units=hidden_layer_size,
		dropout4_p=0.2, output_num_units=N_EVENTS,
		output_nonlinearity=sigmoid,
		
		batch_iterator_train = BatchIterator(batch_size=1000),
		batch_iterator_test = BatchIterator(batch_size=1000),
		
		y_tensor_type=theano.tensor.matrix,
		update=nesterov_momentum,
		update_learning_rate=theano.shared(float(0.03)),
		update_momentum=theano.shared(float(0.9)),
		
		objective_loss_function=loss,
		regression=True,

		on_epoch_finished=[
			AdjustVariable('update_learning_rate', start=0.03,stop=0.0001),
			AdjustVariable('update_momentum', start=0.9, stop=0.999),
			EarlyStopping(patience=100),	
		],

		max_epochs=max_epochs,
		verbose=1,
		)
	
	# load trial dataset
	dic = pickle.load(open('datapickled/traildata.pickle', 'rb'))
	
	X = dic['X']
	y = dic['y']
	
	# process training data
	total_time_points = len(X) // NO_TIME_POINTS
	no_rows = total_time_points * NO_TIME_POINTS

	X = X[0:no_rows, :]
	
	X = X.transpose()
	X_Samples = np.split(X, total_time_points, axis=1)
	X = np.asarray(X_Samples)
	
	y = y[0:no_rows, :]
	y = y[::NO_TIME_POINTS, :]
	y = y.astype('float32')
	
	net.fit(X,y)
	
	tip = datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
	
	# Save the net
	with open('net/net'+tip+'.pickle', 'wb') as f:
		pickle.dump(net, f, -1)
	
	plot(net)

	# Load test data
	dic = pickle.load(open('datapickled/testdata2.pickle', 'rb'))
	X_test = dic['X_test']
	ids_tot = dic['ids_tot']
	test_dict = dic['test_dict']
	test_total = dic['test_total']

	####process test data####
	print("Creating prediction file ... ")
	
	X_test = X_test
	total_test_len = len(X_test)
	
	total_test_time_points = len(X_test) // NO_TIME_POINTS
	remainder_test_points = len(X_test) % NO_TIME_POINTS
	
	no_rows = total_test_time_points * NO_TIME_POINTS
	X_test = X_test[0:no_rows, :]

	X_test = X_test.transpose()
	X_test_Samples = np.split(X_test, total_test_time_points, axis=1)
	X_test = np.asarray(X_test_Samples)
	
	# Evaluate test data
	print("Testing subject 0....")
	params = net.get_all_params_values()
	learned_weights = net.load_params_from(params)
	probabilities = net.predict_proba(X_test)
	
	total_test_points = total_test_len // NO_TIME_POINTS
	remainder_data = total_test_len % NO_TIME_POINTS
	for i, p in enumerate(probabilities):
		if i != total_test_points:
			for j in range(NO_TIME_POINTS):
				pred_tot.append(p)
	
	# create prediction file
	print('Creating submission(prediction) file...')
	tip = datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
	submission_file = 'res/test_conv_net_push'+tip+'.csv'
	# create pandas object
	submission =  pd.DataFrame(index=ids_tot[:len(pred_tot)],columns=cols,data=pred_tot)
	# write file
	submission.to_csv(submission_file, index_label='id', float_format='%.6f')
Beispiel #6
0
def build_dbn():
	cols = ['Start']

	t = 1
	channels = 14
	batch_size = None  #None = arbitary batch size
	hidden_layer_size = 100  #change to 1024
	N_EVENTS = 1
	max_epochs = 100
	NO_TIME_POINTS = 70 #311


	ids_tot = []
	pred_tot = []
	test_dict = dict()

	test_total = 0


	X, y = load_dataset()
	X_test, ids_tot, test_dict, test_total = load_testdata()
	

	net = NeuralNet(
		layers=[
			('input', layers.InputLayer),
			('dropout1', layers.DropoutLayer),
			('conv1', layers.Conv1DLayer),
			('conv2', layers.Conv1DLayer),
			('pool1', layers.MaxPool1DLayer),
			('dropout2', layers.DropoutLayer),
			('hidden4', layers.DenseLayer),
			('dropout3', layers.DropoutLayer),
			('hidden5', layers.DenseLayer),
			('dropout4', layers.DropoutLayer),
			('output', layers.DenseLayer),
		],
		input_shape=(None, channels, NO_TIME_POINTS),
		dropout1_p=0.5,
		conv1_num_filters=4, conv1_filter_size=1,
		conv2_num_filters=8, conv2_filter_size=4, pool1_pool_size=4,
		dropout2_p=0.5, hidden4_num_units=hidden_layer_size,
		dropout3_p=0.5, hidden5_num_units=hidden_layer_size,
		dropout4_p=0.5, output_num_units=N_EVENTS, output_nonlinearity=sigmoid,

		batch_iterator_train = BatchIterator(batch_size=1000),
		batch_iterator_test = BatchIterator(batch_size=1000),

		y_tensor_type=theano.tensor.matrix,
		update=nesterov_momentum,
		update_learning_rate=theano.shared(float(0.03)),
		update_momentum=theano.shared(float(0.9)),

		objective_loss_function=loss,
		regression=True,

		max_epochs=max_epochs,
		verbose=1,
	)

	
	###process training data####
	X = data_preprocess(X)
	total_time_points = len(X) // NO_TIME_POINTS

	no_rows = total_time_points * NO_TIME_POINTS
	print X.shape
	print total_time_points
	print no_rows

	X = X[0:no_rows, :]

	print X.shape

	X = X.transpose()
	X_Samples = np.split(X, total_time_points, axis=1)
	X = np.asarray(X_Samples)
	print X.shape


	y = y[0:no_rows, :]
	y = y[::NO_TIME_POINTS, :]





	print("Training trial %d...." %(t))
	net.fit(X,y)

	####process test data####
	print("Creating prediction file ... ")

	X_test = X_test
	X_test = data_preprocess(X_test)
	total_test_time_points = len(X_test) // NO_TIME_POINTS
	remainder_test_points = len(X_test) % NO_TIME_POINTS

	no_rows = total_test_time_points * NO_TIME_POINTS
	X_test = X_test[0:no_rows, :]

	X_test = X_test.transpose()
	X_test_Samples = np.split(X_test, total_test_time_points, axis=1)
	X_test = np.asarray(X_test_Samples)
	
###########################################################################
#######get predictions and write to files for series 9 and series 10#######
	print("Testing subject 0....")
	params = net.get_all_params_values()
	learned_weights = net.load_params_from(params)
	probabilities = net.predict_proba(X_test)

	total_time_points = []
	all_remainder_data = []
	subs = []
	total_test_points = 0

	trials = np.array(['01','02','03','04','05','06','07','08','09','10'])

	for trial in trials:    	
		sub = 'subj{0}_series{1}'.format('0', trial)
		data_len = test_dict[sub]
		total_time_point = data_len // NO_TIME_POINTS
		remainder_data = data_len % NO_TIME_POINTS

		subs.append(sub)
		total_time_points.append(total_time_point)
		all_remainder_data.append(remainder_data)

	total_test_points = np.sum(total_time_points)


	print len(ids_tot)
	print cols

	print len(probabilities)

	for i, p in enumerate(probabilities):
		for j in range(NO_TIME_POINTS):
			pred_tot.append(p)


	print len(pred_tot)
	
	# for k in range(np.sum(all_remainder_data)):
	# 	pred_tot.append(pred_tot[-1])

	#submission file
	print('Creating submission(prediction) file...')

	submission_file = './test_conv_net_push.csv'
	# create pandas object for sbmission

	submission = pd.DataFrame(index=ids_tot[:len(pred_tot)],
	                           columns=cols,
	                           data=pred_tot)
	# write file
	submission.to_csv(submission_file, index_label='id', float_format='%.6f')