def backpropogate(network, input_vector, targets):
    
    hidden_outputs, outputs = feed_forward(network, input_vector)
    
    # recall derivative of sigmoid is same as logit
    output_deltas = [output * (1 - output) * (output - target)
                    for output, target in zip(outputs, targets)]
    
    # adjust weights gradient descent style
    for i, output_neuron in enumerate(network[-1]):
        # iterate over hidden layers
        for j, hidden_output in enumerate(hidden_outputs + [1]):
            output_neuron[j] -= output_deltas[i] * hidden_output
            
    # now propogate this change backward
    hidden_deltas = [hidden_output * (1 - hidden_output) * 
                    dot(output_deltas, [n[i] for n in output_layer])
                    for i, hidden_output in enumerate(hidden_outputs)]
    
    # adjust weights
    for i, hidden_neuron in enumerate(network[0]):
        for j, input in enumerate(input_vector + [1]):
            hidden_neuron[j] -= hidden_deltas[i] * input
Пример #2
0
def  backpropagate(network, input_vector, targets):
	hidden_outputs, outputs = feed_forward(network, input_vector)
	
	#the output * (1 - output) is from teh derivative of sigmoid
	output_deltas = [output * (1 - output) * (output - target) 
							for output, target in zip(outputs, targets)]
							
	#adjust weights for output layer, one neuron at a time
	for i, output_neuron in enumerate(network[-1]):
		#focus on the ith output layer neuron
		for j, hidden_output in enumerate(hidden_outputs + [1]):
			#adjust the jth weight based on both
			#this neurons delta and its jth input
			output_neuron[j] -= output_deltas[i] * hidden_output
			
	#backproagate errors to hidden layer
	hidden_deltas = [hidden_output * (1 - hidden_output) * 
							lin_alg.dot(output_deltas, [n[i] for n in output_layer]) 
							for i, hidden_output in enumerate(hidden_outputs)]
							
	#adjust weights for hidden layer, one neuron at a time
	for i, hidden_neuron in enumerate(network[0]):
		for j, input in enumerate(input_vector + [1]):
			hidden_neuron[j] -= hidden_deltas[i] * input
Пример #3
0
def cosine_similarity(v, w):
	return lin_alg.dot(v, w) / math.sqrt(lin_alg.dot(v, v) * lin_alg.dot(w, w))
Пример #4
0
def perceptron_output(weights, bias, x):
	'''returns 1 if the perceptron fires, 0 if not'''
	calculation = lin_alg.dot(weights, x) + bias
	return step_function(calculation)
Пример #5
0
def neuron_output(weights, inputs):
	return sigmoid(lin_alg.dot(weights, inputs))
def perceptron_output(weights, bias, x):
    """returns 1 if perceptron fires, 0 if not"""
    calculation = dot(weights, x) + bias
    return step_function(calculation)
def neuron_output(weights, inputs):
    return sigmoid(dot(weights, inputs))
Пример #8
0
def transform_vector(v, components):
	return [lin_alg.dot(v, w) for w in components]
Пример #9
0
def project(v, w):
	'''projection of v onto w'''
	projection_length = lin_alg.dot(v, w)
	return lin_alg.scalar_multiply(projection_length, w)
Пример #10
0
def directional_variance_gradient_i(x_i, w):
	'''contribution of row x_i to gradient of direction w variance'''
	projection_length = lin_alg.dot(x_i, direction(w))
	return [2 * projection_length * x_ij for x_ij in x_i]
Пример #11
0
def directional_variance_i(x_i, w):
	'''var of row x_i in direction w'''
	return lin_alg.dot(x_i, direction(w))**2