Esempio n. 1
0
 def test_dot_mat_vect(self):
     self.mat.shape = (5, 4, 2, 3)
     self.mat.cols = (0, 2, 3)
     self.mat.axes = ('freq', 'mode', 'a', 'b')
     prod = algebra.dot(self.mat, self.vect)
     self.assertEqual(prod.mat_shape(), (20, ))
     self.assertEqual(prod.shape, (5, 4))
     self.assertEqual(prod.info['axes'], ('freq', 'mode'))
     self.assertTrue(
         sp.allclose(prod.flatten(),
                     sp.dot(self.mat.expand(), self.vect.flatten())))
     # Make sure it checks that the inner axis names match.
     self.mat.axes = ('freq', 'mode', 'c', 'b')
     algebra.dot(self.mat, self.vect, check_inner_axes=False)
     self.assertRaises(ce.DataError, algebra.dot, self.mat, self.vect)
     # Make sure that is checks that the inner axes lengths match.
     self.mat.shape = (5, 4, 6)
     self.mat.cols = (0, 2)
     self.mat.axes = ('freq', 'mode', 'a')
     algebra.dot(self.mat, self.vect, check_inner_axes=False)
     self.assertRaises(ce.DataError, algebra.dot, self.mat, self.vect)
Esempio n. 2
0
def luna(light=(0, 0, 1), bary=(1, 1, 1), vnormals=(), bcolor=(1, 1, 1)):
    # coordenadas barycentricas
    w, v, u = bary
    # vectores normales
    nA, nB, nC = vnormals
    light = (0, 1, -2)
    iA, iB, iC = [algebra.dot(n, light) for n in (nA, nB, nC)]
    # Calculamos la intensidad de la luz
    intensity = w * iA + v * iB + u * iC

    return objetos.glColor(bcolor[2] * intensity, bcolor[1] * intensity,
                           bcolor[0] * intensity)
Esempio n. 3
0
 def test_dot_mat_vect(self) :
     self.mat.shape = (5, 4, 2, 3)
     self.mat.cols = (0, 2, 3)
     self.mat.axes = ('freq', 'mode', 'a', 'b')
     prod = algebra.dot(self.mat, self.vect)
     self.assertEqual(prod.mat_shape(), (20,))
     self.assertEqual(prod.shape, (5, 4))
     self.assertEqual(prod.info['axes'], ('freq', 'mode'))
     self.assertTrue(sp.allclose(prod.flatten(),
                     sp.dot(self.mat.expand(),
                     self.vect.flatten())))
     # Make sure it checks that the inner axis names match.
     self.mat.axes = ('freq', 'mode', 'c', 'b')
     algebra.dot(self.mat, self.vect, check_inner_axes=False)
     self.assertRaises(ce.DataError, algebra.dot, self.mat, self.vect)
     # Make sure that is checks that the inner axes lengths match.
     self.mat.shape = (5, 4, 6)
     self.mat.cols = (0, 2)
     self.mat.axes = ('freq', 'mode', 'a')
     algebra.dot(self.mat, self.vect, check_inner_axes=False)
     self.assertRaises(ce.DataError, algebra.dot, self.mat, self.vect)
Esempio n. 4
0
def backpropogate(network, input_vector, target):
    hidden_outputs, outputs = feed_forward(network, input_vector)

    # the output * (1 - output) is from the derivative of sigmoid
    output_deltas = [output * (1 - output) * (output - target[i])
                     for i, output in enumerate(outputs)]

    # adjust weights for output layer (network[-1])
    for i, output_neuron in enumerate(network[-1]):
        for j, hidden_output in enumerate(hidden_outputs + [1]):
            output_neuron[j] -= output_deltas[i] * hidden_output

    hidden_deltas = [hidden_output * (1 - hidden_output) *
                     algebra.dot(output_deltas, [n[i] for n in network[1]])
                     for i, hidden_output in enumerate(hidden_outputs)]

    for i, hidden_neuron in enumerate(network[0]):
        for j, input in enumerate(input_vector + 1):
            hidden_neuron[j] -= hidden_deltas[i] * input
Esempio n. 5
0
def backpropogate(network, input_vector, target):
    hidden_outputs, outputs = feed_forward(network, input_vector)

    # the output * (1 - output) is from the derivative of sigmoid
    output_deltas = [
        output * (1 - output) * (output - target[i])
        for i, output in enumerate(outputs)
    ]

    # adjust weights for output layer (network[-1])
    for i, output_neuron in enumerate(network[-1]):
        for j, hidden_output in enumerate(hidden_outputs + [1]):
            output_neuron[j] -= output_deltas[i] * hidden_output

    hidden_deltas = [
        hidden_output * (1 - hidden_output) *
        algebra.dot(output_deltas, [n[i] for n in network[1]])
        for i, hidden_output in enumerate(hidden_outputs)
    ]

    for i, hidden_neuron in enumerate(network[0]):
        for j, input in enumerate(input_vector + 1):
            hidden_neuron[j] -= hidden_deltas[i] * input
Esempio n. 6
0
def directional_variance_gradient_i(x_i, w):
    """the contribution of row x_i
    to the gradient of the direction-w variance"""
    projection_length = algebra.dot(x_i, direction(w))
    return [2 * projection_length * x_ij for x_ij in x_i]
Esempio n. 7
0
def covariance(x, y):
    """how variables vary in tandem from their means"""
    n = len(x)
    return algebra.dot(de_mean(x), de_mean(y)) / (n - 1)
Esempio n. 8
0
def directional_variance_i(x_i, w):
    """the variance of the row x_i in the direction determined by w"""
    return algebra.dot(x_i, direction(w)) ** 2
def logistic_log_partial_ij(x_i, y_i, beta, j):
    """i is the index of the data point, j is the index
    of the derivative"""
    return (y_i - logistic(algebra.dot(x_i, beta))) * x_i[j]
Esempio n. 10
0
def logistic_log_likelihood_i(x_i, y_i, beta):
    if y_i == 1:
        return math.log(logistic(algebra.dot(x_i, beta)))
    else:
        return math.log(1 - logistic(algebra.dot(x_i, beta)))
Esempio n. 11
0
def logistic_log_partial_ij(x_i, y_i, beta, j):
    """i is the index of the data point, j is the index
    of the derivative"""
    return (y_i - logistic(algebra.dot(x_i, beta))) * x_i[j]
Esempio n. 12
0
def logistic_log_likelihood_i(x_i, y_i, beta):
    if y_i == 1:
        return math.log(logistic(algebra.dot(x_i, beta)))
    else:
        return math.log(1 - logistic(algebra.dot(x_i, beta)))
Esempio n. 13
0
def perceptron_output(weights, bias, x):
    """returns 1 if the perceptron 'fires'; 0 if not"""
    return step_function(algebra.dot(weights, x) + bias)
Esempio n. 14
0
def directional_variance_i(x_i, w):
    """the variance of the row x_i in the direction determined by w"""
    return algebra.dot(x_i, direction(w)) ** 2
Esempio n. 15
0
def neuron_output(weights, inputs):
    return sigmoid(algebra.dot(weights, inputs))
Esempio n. 16
0
def predict(x_i, beta):
    """assumes that the first element of each x_i is 1"""
    return dot(x_i, beta)
Esempio n. 17
0
def predict(x_i, beta):
    """assumes that the first element of each x_i is 1"""
    return dot(x_i, beta)
Esempio n. 18
0
def ridge_penalty(beta, alpha):
    """alpha is a hyper-parameter controlling how harsh the penalty
    is; sometimes called 'lambda' but that is already a Python keyword"""
    return alpha * algebra.dot(beta[1:], beta[1:])
Esempio n. 19
0
def perceptron_output(weights, bias, x):
    """returns 1 if the perceptron 'fires'; 0 if not"""
    return step_function(algebra.dot(weights, x) + bias)
Esempio n. 20
0
def covariance(xs: List[float], ys: List[float]) -> float:
    assert len(xs) == len(ys)
    return dot((de_mean(xs), de_mean(ys) / (len(xs) - 1)))
Esempio n. 21
0
def directional_variance_gradient_i(x_i, w):
    """the contribution of row x_i
    to the gradient of the direction-w variance"""
    projection_length = algebra.dot(x_i, direction(w))
    return [2 * projection_length * x_ij for x_ij in x_i]
Esempio n. 22
0
def ridge_penalty(beta, alpha):
    """alpha is a hyper-parameter controlling how harsh the penalty
    is; sometimes called 'lambda' but that is already a Python keyword"""
    return alpha * algebra.dot(beta[1:], beta[1:])
Esempio n. 23
0
def neuron_output(weights, inputs):
    return sigmoid(algebra.dot(weights, inputs))