def train(net, uris, epochs, learning_rate, validation_percentage, save_file = False):
	"""
	@param net 						network to be trained
	@param uris						uris that the data is parsed from
	@param epochs					maximum number of iterations
	@param learning_rate			learing rate
	@param validation_percentage	percentage of training_set that should be used for validation instead
	@param save_file				optional filepath to save weights and biases

	@postcondition: 				trains network with accurate weights and biases
									with given arguments (network not returned, object is just modified)
	@return 						percentage of correct test/verification data (see if it trained correctly)
	"""
	global net
	global mean
	global std

	pitches = parser.parse(uris, stand)

	inputs = np.array([ [pitch[0], pitch[1]] for pitch in pitches ])
	outputs = np.array([ [pitch[2]] for pitch in pitches ])

	mean = np.mean(inputs, axis=0)
	std = np.std(inputs, axis=0)

	inputs -= mean												# zeroing data
	inputs /= std 												# normalizing data



	training_set = [ (i,o) for i, o in zip(inputs, outputs) ]	# data structure for data according to neuralpy
	random.shuffle(training_set)								# randomize the training set so not training same things in same order
	neuralpy.output("len: " + str(len(training_set)))

	cutoff = int(validation_percentage * len(training_set))		# determine the cutoff index

	test_set = training_set[:cutoff]							# fraction of all data that is test set
	training_set = training_set[cutoff:]						# training set being cut down to other fraction

	batch_length = int(.6 * len(training_set))

	net.train(training_set, epochs, learning_rate, batch_length=batch_length, monitor_cost=True)


	# count = test(net, training_set)										# getting number of correct examples in training set
	count = test(net, test_set)												# number of correct examples in test_set


	# if there is a save file specified, save the weights
	# and biases to the file in json format.
	if save_file:
		save(save_file, net)

	network = net 															# setting the global variable for reuse
	return float(count)/len(test_set)
Example #2
0
import neuralpy

net = neuralpy.Network(2, 10, 8, 1)

neuralpy.output(net.feedforward([1,1]))
neuralpy.output(net.feedforward([0,1]))
neuralpy.output(net.feedforward([1,0]))
neuralpy.output(net.feedforward([0,0]))


datum_1 = ([1, 1], [0])
datum_2 = ([1, 0], [1])
datum_3 = ([0, 1], [1])
datum_4 = ([0, 0], [0])

training_data = [datum_1, datum_2, datum_3, datum_4]

epochs = 300

learning_rate = 1

net.train(training_data, epochs, learning_rate, monitor_cost=True)

neuralpy.output()

neuralpy.output(net.feedforward([1,1]))
neuralpy.output(net.feedforward([0,1]))
neuralpy.output(net.feedforward([1,0]))
neuralpy.output(net.feedforward([0,0]))

net.show_costs()
Example #3
0
import neuralpy

net = neuralpy.Network(2, 10, 8, 1)

neuralpy.output(net.feedforward([1, 1]))
neuralpy.output(net.feedforward([0, 1]))
neuralpy.output(net.feedforward([1, 0]))
neuralpy.output(net.feedforward([0, 0]))

datum_1 = ([1, 1], [0])
datum_2 = ([1, 0], [1])
datum_3 = ([0, 1], [1])
datum_4 = ([0, 0], [0])

training_data = [datum_1, datum_2, datum_3, datum_4]

epochs = 300

learning_rate = 3

net.train(training_data, epochs, learning_rate, monitor_cost=True)

neuralpy.output()

neuralpy.output(net.feedforward([1, 1]))
neuralpy.output(net.feedforward([0, 1]))
neuralpy.output(net.feedforward([1, 0]))
neuralpy.output(net.feedforward([0, 0]))

net.show_costs()
Example #4
0
# 	|	F 	|	F 	|	  F 	|
# 	|_______|_______|___________|
# 	
# 	In our network, 1 will represent True and
# 	0 will represent False
# 
import neuralpy

# set up a basic neural network with a 2-node input layer, 
# one 3-neuron hidden layer, and one 1-neuron output layer
net = neuralpy.Network(2, 3, 1)

# here is some arbitrary input that we will use to test
x = [1, 1]
out = net.feedforward(x)
neuralpy.output(out)

# here is our training_data the reflects the truth table
# in the header of this file
datum_1 = ([1, 1], [1])
datum_2 = ([1, 0], [1])
datum_3 = ([0, 1], [1])
datum_4 = ([0, 0], [0])

training_data = [datum_1, datum_2, datum_3, datum_4]

# we set our other hyperparameter and the number of
# epochs that we want to train the network for
learning_rate = 1
epochs = 100
Example #5
0
# 	|	F 	|	F 	|	  F 	|
# 	|_______|_______|___________|
#
# 	In our network, 1 will represent True and
# 	0 will represent False
#
import neuralpy

# set up a basic neural network with a 2-node input layer,
# one 3-neuron hidden layer, and one 1-neuron output layer
net = neuralpy.Network(2, 3, 1)

# here is some arbitrary input that we will use to test
x = [1, 1]
out = net.feedforward(x)
neuralpy.output(out)

# here is our training_data the reflects the truth table
# in the header of this file
datum_1 = ([1, 1], [1])
datum_2 = ([1, 0], [1])
datum_3 = ([0, 1], [1])
datum_4 = ([0, 0], [0])

training_data = [datum_1, datum_2, datum_3, datum_4]

# we set our other hyperparameter and the number of
# epochs that we want to train the network for
learning_rate = 1
epochs = 100
Example #6
0
import classifier
import neuralpy
import grapher

net = neuralpy.Network(2, 8, 1)

uris = [ "miller_xml/" + str(i) + ".xml" for i in range(1,13) ]

epochs = 200
learning_rate = 0.05

validation_percentage = .32

ps = []

classifier.stand = "L"

for i in range(0, 10):
	net.randomize_parameters()
	p = classifier.train(net, uris, epochs, learning_rate, validation_percentage, save_file='results/miller_' + str(i) + '.txt')
	neuralpy.output(p)
	ps.append(p)


i = ps.index(max(ps))
neuralpy.output("\n\n" + str(max(ps)) + " at " + str(i))

grapher.graph(filepath='results/miller_' + str(i) + '.txt')


# grapher.graph(filepath='results/miller_4.txt')