예제 #1
0
 def test_dot_mat_vect(self):
     self.mat.shape = (5, 4, 2, 3)
     self.mat.cols = (0, 2, 3)
     self.mat.axes = ('freq', 'mode', 'a', 'b')
     prod = algebra.dot(self.mat, self.vect)
     self.assertEqual(prod.mat_shape(), (20, ))
     self.assertEqual(prod.shape, (5, 4))
     self.assertEqual(prod.info['axes'], ('freq', 'mode'))
     self.assertTrue(
         sp.allclose(prod.flatten(),
                     sp.dot(self.mat.expand(), self.vect.flatten())))
     # Make sure it checks that the inner axis names match.
     self.mat.axes = ('freq', 'mode', 'c', 'b')
     algebra.dot(self.mat, self.vect, check_inner_axes=False)
     self.assertRaises(ce.DataError, algebra.dot, self.mat, self.vect)
     # Make sure that is checks that the inner axes lengths match.
     self.mat.shape = (5, 4, 6)
     self.mat.cols = (0, 2)
     self.mat.axes = ('freq', 'mode', 'a')
     algebra.dot(self.mat, self.vect, check_inner_axes=False)
     self.assertRaises(ce.DataError, algebra.dot, self.mat, self.vect)
예제 #2
0
def luna(light=(0, 0, 1), bary=(1, 1, 1), vnormals=(), bcolor=(1, 1, 1)):
    # coordenadas barycentricas
    w, v, u = bary
    # vectores normales
    nA, nB, nC = vnormals
    light = (0, 1, -2)
    iA, iB, iC = [algebra.dot(n, light) for n in (nA, nB, nC)]
    # Calculamos la intensidad de la luz
    intensity = w * iA + v * iB + u * iC

    return objetos.glColor(bcolor[2] * intensity, bcolor[1] * intensity,
                           bcolor[0] * intensity)
예제 #3
0
 def test_dot_mat_vect(self) :
     self.mat.shape = (5, 4, 2, 3)
     self.mat.cols = (0, 2, 3)
     self.mat.axes = ('freq', 'mode', 'a', 'b')
     prod = algebra.dot(self.mat, self.vect)
     self.assertEqual(prod.mat_shape(), (20,))
     self.assertEqual(prod.shape, (5, 4))
     self.assertEqual(prod.info['axes'], ('freq', 'mode'))
     self.assertTrue(sp.allclose(prod.flatten(),
                     sp.dot(self.mat.expand(),
                     self.vect.flatten())))
     # Make sure it checks that the inner axis names match.
     self.mat.axes = ('freq', 'mode', 'c', 'b')
     algebra.dot(self.mat, self.vect, check_inner_axes=False)
     self.assertRaises(ce.DataError, algebra.dot, self.mat, self.vect)
     # Make sure that is checks that the inner axes lengths match.
     self.mat.shape = (5, 4, 6)
     self.mat.cols = (0, 2)
     self.mat.axes = ('freq', 'mode', 'a')
     algebra.dot(self.mat, self.vect, check_inner_axes=False)
     self.assertRaises(ce.DataError, algebra.dot, self.mat, self.vect)
예제 #4
0
def backpropogate(network, input_vector, target):
    hidden_outputs, outputs = feed_forward(network, input_vector)

    # the output * (1 - output) is from the derivative of sigmoid
    output_deltas = [output * (1 - output) * (output - target[i])
                     for i, output in enumerate(outputs)]

    # adjust weights for output layer (network[-1])
    for i, output_neuron in enumerate(network[-1]):
        for j, hidden_output in enumerate(hidden_outputs + [1]):
            output_neuron[j] -= output_deltas[i] * hidden_output

    hidden_deltas = [hidden_output * (1 - hidden_output) *
                     algebra.dot(output_deltas, [n[i] for n in network[1]])
                     for i, hidden_output in enumerate(hidden_outputs)]

    for i, hidden_neuron in enumerate(network[0]):
        for j, input in enumerate(input_vector + 1):
            hidden_neuron[j] -= hidden_deltas[i] * input
예제 #5
0
def backpropogate(network, input_vector, target):
    hidden_outputs, outputs = feed_forward(network, input_vector)

    # the output * (1 - output) is from the derivative of sigmoid
    output_deltas = [
        output * (1 - output) * (output - target[i])
        for i, output in enumerate(outputs)
    ]

    # adjust weights for output layer (network[-1])
    for i, output_neuron in enumerate(network[-1]):
        for j, hidden_output in enumerate(hidden_outputs + [1]):
            output_neuron[j] -= output_deltas[i] * hidden_output

    hidden_deltas = [
        hidden_output * (1 - hidden_output) *
        algebra.dot(output_deltas, [n[i] for n in network[1]])
        for i, hidden_output in enumerate(hidden_outputs)
    ]

    for i, hidden_neuron in enumerate(network[0]):
        for j, input in enumerate(input_vector + 1):
            hidden_neuron[j] -= hidden_deltas[i] * input
예제 #6
0
def directional_variance_gradient_i(x_i, w):
    """the contribution of row x_i
    to the gradient of the direction-w variance"""
    projection_length = algebra.dot(x_i, direction(w))
    return [2 * projection_length * x_ij for x_ij in x_i]
예제 #7
0
def covariance(x, y):
    """how variables vary in tandem from their means"""
    n = len(x)
    return algebra.dot(de_mean(x), de_mean(y)) / (n - 1)
예제 #8
0
def directional_variance_i(x_i, w):
    """the variance of the row x_i in the direction determined by w"""
    return algebra.dot(x_i, direction(w)) ** 2
예제 #9
0
def logistic_log_partial_ij(x_i, y_i, beta, j):
    """i is the index of the data point, j is the index
    of the derivative"""
    return (y_i - logistic(algebra.dot(x_i, beta))) * x_i[j]
예제 #10
0
def logistic_log_likelihood_i(x_i, y_i, beta):
    if y_i == 1:
        return math.log(logistic(algebra.dot(x_i, beta)))
    else:
        return math.log(1 - logistic(algebra.dot(x_i, beta)))
예제 #11
0
def logistic_log_partial_ij(x_i, y_i, beta, j):
    """i is the index of the data point, j is the index
    of the derivative"""
    return (y_i - logistic(algebra.dot(x_i, beta))) * x_i[j]
예제 #12
0
def logistic_log_likelihood_i(x_i, y_i, beta):
    if y_i == 1:
        return math.log(logistic(algebra.dot(x_i, beta)))
    else:
        return math.log(1 - logistic(algebra.dot(x_i, beta)))
예제 #13
0
def perceptron_output(weights, bias, x):
    """returns 1 if the perceptron 'fires'; 0 if not"""
    return step_function(algebra.dot(weights, x) + bias)
예제 #14
0
def directional_variance_i(x_i, w):
    """the variance of the row x_i in the direction determined by w"""
    return algebra.dot(x_i, direction(w)) ** 2
예제 #15
0
def neuron_output(weights, inputs):
    return sigmoid(algebra.dot(weights, inputs))
예제 #16
0
def predict(x_i, beta):
    """assumes that the first element of each x_i is 1"""
    return dot(x_i, beta)
예제 #17
0
def predict(x_i, beta):
    """assumes that the first element of each x_i is 1"""
    return dot(x_i, beta)
예제 #18
0
def ridge_penalty(beta, alpha):
    """alpha is a hyper-parameter controlling how harsh the penalty
    is; sometimes called 'lambda' but that is already a Python keyword"""
    return alpha * algebra.dot(beta[1:], beta[1:])
예제 #19
0
def perceptron_output(weights, bias, x):
    """returns 1 if the perceptron 'fires'; 0 if not"""
    return step_function(algebra.dot(weights, x) + bias)
예제 #20
0
def covariance(xs: List[float], ys: List[float]) -> float:
    assert len(xs) == len(ys)
    return dot((de_mean(xs), de_mean(ys) / (len(xs) - 1)))
예제 #21
0
def directional_variance_gradient_i(x_i, w):
    """the contribution of row x_i
    to the gradient of the direction-w variance"""
    projection_length = algebra.dot(x_i, direction(w))
    return [2 * projection_length * x_ij for x_ij in x_i]
예제 #22
0
def ridge_penalty(beta, alpha):
    """alpha is a hyper-parameter controlling how harsh the penalty
    is; sometimes called 'lambda' but that is already a Python keyword"""
    return alpha * algebra.dot(beta[1:], beta[1:])
예제 #23
0
def neuron_output(weights, inputs):
    return sigmoid(algebra.dot(weights, inputs))