def forward(self, input: Tensor) -> Tensor:
     self.input = input  # Save both input and previous 
     self.prev_hidden = self.hidden # hidden state to use in backprop
     
     a = [dot(self.w[h], input) + dot(self.u[h], self.hidden) + self.b[h]
         for h in range(self.hidden_dim)]
     
     self.hidden = tensor_apply(tanh, a) # Apply tanh activation
     return self.hidden # and return the result
def sqerror_gradients(network: List[List[Vector]], input_vector: Vector,
                      target_vector: Vector) -> List[List[Vector]]:
    """Given a neural network, an input vector and a target vector,
    makes a prediction and computes the gradient of squared error loss
    with respect to the neuron weights."""

    # forward pass
    hidden_outputs, outputs = feed_forward(network, input_vector)

    # gradients  with respect to output neuron pre-activation outputs
    output_deltas = [
        output * (1 - output) * (output - target)
        for output, target in zip(outputs, target_vector)
    ]

    # gradients with respect to output neuron weights
    output_grads = [[
        output_deltas[i] * hidden_output
        for hidden_output in hidden_outputs + [1]
    ] for i, output_neuron in enumerate(network[-1])]

    # gradients with respect to hidden neuron pre-activation outputs
    hidden_delats = [
        hidden_output * (1 - hidden_output) *
        dot(output_deltas, [n[i] for n in network[-1]])
        for i, hidden_output in enumerate(hidden_outputs)
    ]

    # gradients with respect to hidden neuron weights
    hidden_grads = [[hidden_delats[i] * input for input in input_vector + [1]]
                    for i, hidden_neuron in enumerate(network[0])]

    return [hidden_grads, output_grads]
Example #3
0
    def forward(self, input: Tensor) -> Tensor:
        # Save the input to use in the backward pass.
        self.input = input

        # Return the vector of neuron outputs.
        return [dot(input, self.w[o]) + self.b[o]
                for o in range(self.output_dim)]
def shader_3(base_color, obj_vertices, barycentric_coord, normal_vectors):
    A, B, C = obj_vertices
    w, v, u = barycentric_coord
    nA, nB, nC = normal_vectors

    x = A.x * w + B.x * v + C.x * u
    y = A.y * w + B.y * v + C.y * u
    z = A.z * w + B.z * v + C.z * u

    nx = nA.x * w + nB.x * v + nC.x * u
    ny = nA.y * w + nB.y * v + nC.y * u
    nz = nA.z * w + nB.z * v + nC.z * u

    vn = V3(nx, ny, nz)

    intensity = dot(vn, V3(0, 1, 1))

    if y < 0.10:
        base_color = (base_color[0], base_color[1] + (abs(y - 0.10) * 2),
                      base_color[2])

    if intensity < 0:
        intensity = 0

    intensity += 0.2

    if intensity > 1:
        intensity = 1

    return (
        base_color[0] * intensity if base_color[0] * intensity <= 1 else 1,
        base_color[1] * intensity if base_color[1] * intensity <= 1 else 1,
        base_color[2] * intensity if base_color[2] * intensity <= 1 else 1,
    )
def covariance(x, y):
    """
    Whereas variance measures how a single variable deviates from its mean, 
    covariance measures how two variables vary in tandem from their means.
    
    A "large" positive covariance means that x tends to be large when  y is large 
    and small when y is small. A "large" negative covariance means the opposite - 
    that x tends to be small when y is large and vice versa. A covariance close to 
    zero means no such relationship exists.
    """
    n = len(x)
    return dot(de_mean(x), de_mean(y)) / (n - 1)
def covariance(x, y):
    """
    Whereas variance measures how a single variable deviates from its mean, 
    covariance measures how two variables vary in tandem from their means.
    
    A "large" positive covariance means that x tends to be large when  y is large 
    and small when y is small. A "large" negative covariance means the opposite - 
    that x tends to be small when y is large and vice versa. A covariance close to 
    zero means no such relationship exists.
    """
    n = len(x)
    return dot(de_mean(x), de_mean(y)) / (n - 1)
Example #7
0
def directional_variance_gradient_i(x_i, w):
    """the contribution of row x_i to the gradient of the direction-w variance"""
    projection_length = dot(x_i, direction(w))
    return [2 * projection_length * x_ij for x_ij in x_i]
def neuron_output(weights, inputs):
    return sigmoid(dot(weights, inputs))
def perceptron_output(weights: Vector, bias: float, x: Vector) -> float:
    """Returns 1 if the perceptron fires, else returns 0"""
    calculation = dot(weights, x) + bias
    return step_function(calculation)
def neuron_output(weights: Vector, inputs: Vector) -> float:
    """Weights include a bias terms, input includes a 1."""
    return sigmoid(dot(weights, inputs))
def sum_of_squares(v: Vector) -> float:
    """Computes the sum of squared elements in v"""
    return dot(v, v)
Example #12
0
def project(v, w):
    """return the projection of v onto the direction w"""
    projection_length = dot(v, w)
    return scalar_multiply(projection_length, w)
Example #13
0
def directional_variance_i(x_i, w):
    """the variance of the row x_i in the direction determined by w"""
    return dot(x_i, direction(w)) ** 2
Example #14
0
def matrix_product_entry(A, B, i, j):
    return dot(get_row(A, i), get_column(B, j))
def matrix_times_vector(m: Matrix, v: Vector) -> Vector:
    nr, nc = shape(m)
    n = len(v)
    assert nc == n, "must have (# of columns in m) == (# of elements in v)"
    
    return [dot(row, v) for row in m] # output has length nr
                    for user_id in user_ids}
movie_vectors = {movie_id: random_tensor(EMBEDDING_DIM)
                     for movie_id in movie_ids}

rom typing import List
import tqdm
from vector_operations import dot
    
def loop(dataset: List[Rating],
            learning_rate: float = None) -> None:
    with tqdm.tqdm(dataset) as t:
        loss = 0.0
        for i, rating in enumerate(t):
            movie_vector = movie_vectors[rating.movie_id]
            user_vector = user_vectors[rating.user_id]
            predicted = dot(user_vector, movie_vector)
            error = predicted - rating.rating
            loss += error ** 2
    
            if learning_rate is not None:
                    #     predicted = m_0 * u_0 + ... + m_k * u_k
                    # So each u_j enters output with coefficent m_j
                    # and each m_j enters output with coefficient u_j
                user_gradient = [error * m_j for m_j in movie_vector]
                movie_gradient = [error * u_j for u_j in user_vector]
    
                    # Take gradient steps
                for j in range(EMBEDDING_DIM):
                    user_vector[j] -= learning_rate * user_gradient[j]
                    movie_vector[j] -= learning_rate * movie_gradient[j]
    
Example #17
0
def transform_vector(v, components):
    return [dot(v, w) for w in components]
def ridge_penalty(beta: Vector,
                 alpha: float) -> float:
    return alpha * dot(beta[1:], beta[1:])
def predict(x: Vector, beta: Vector) -> float:
    """Assumes that the first element of x is 1"""
    return dot(x, beta)
def cosine_similarity(v1: Vector, v2: Vector) -> float:
    return dot(v1, v2)/math.sqrt(dot(v1,v1) * dot(v2,v2))
Example #21
0
def ridge_penalty(beta, alpha):
    return alpha * dot(beta[1:], beta[1:])
def ridge_penalty(beta, alpha):
    return alpha * dot(beta[1:], beta[1:])
def matrix_product_entry(A, B, i, j):
    return dot(get_row(A, i), get_column(B, j))