Esempio n. 1
0
		[
			[0.25],
			[0.25],
			[0.25]
		]
]

print datetime.datetime.now()
tprime = lambda x: 1 - x**2
faux_data = [((1,1),1)]
test_ann = ANN(math.tanh, tprime, t_weights, False)
test_ann.populate(faux_data)
print "Numerical Gradient, tanh", test_ann.num_grad()
print "Actual Gradient, tanh", test_ann.calc_err_grad()[0]

test_ann.set_ident(True)
print "Numerical Gradient, ident", test_ann.num_grad()
print "Actual Gradient, ident", test_ann.calc_err_grad()[0]
#Problem 11.2 NN
print datetime.datetime.now()
weights = [np.random.rand(3,10) / 100, np.random.rand(11,1) / 100]
#A test:
ann = ANN(math.tanh, tprime, weights, True)
ann.populate(sampled_data)
#Part a
max_itr = 1000
print "Training ANN with no lambda for", max_itr,"iterations"
print "Actual grad, no lambda", ann.calc_err_grad()[0]
print "Numerical grad, no lambda", ann.num_grad()

Eins_a, itr_a = ann.train(max_itr)
Esempio n. 2
0
#The architecture of the network is defined entirely by the weights
#Each row represents the weights of the edges going from one node (in this case the 0th node of the 0th layer) to each node in the next layer (aside from the bias node)
weights = [
		[
			[0.25, 0.25], 
			[0.25, 0.25],
			[0.25, 0.25]
		],
		[
			[0.25],
			[0.25],
			[0.25]
		]
]
data = [([1,1],1)]

ann = ANN(tanh, tanhprime, weights)
ann.populate(data)
print "Numerical gradient", ann.num_grad()
print "Actual gradient", ann.calc_err_grad()[0]
ann.diagnostic()
ann.set_ident(True)
print "Numerical Gradient, ident", ann.num_grad()
print "Actual Gradient, ident", ann.calc_err_grad()[0]
ann.diagnostic()
ann.set_ident(False)
ann.set_lamb(0.01 / 300)
print "Numerical Gradient, lambda = 0.01/N", ann.num_grad()
print "Actual Gradient, lambda = 0.01/N", ann.calc_err_grad()[0]