def forward(self, input: Tensor) -> Tensor:
        self.input = input  # Save both input and previous
        self.prev_hidden = self.hidden  # hidden state to use in backprop

        a = [
            dot(self.w[h], input) + dot(self.u[h], self.hidden) + self.b[h]
            for h in range(self.hidden_dim)
        ]

        self.hidden = tensor_apply(tanh, a)  # Apply tanh activation
        return self.hidden  # and return the result
def sqerror_gradients(network: List[List[Vector]], input_vector: Vector,
                      target_vector: Vector) -> List[List[Vector]]:
    """Given a neural network, an input vector and a target vector,
    makes a prediction and computes the gradient of squared error loss
    with respect to the neuron weights."""

    # forward pass
    hidden_outputs, outputs = feed_forward(network, input_vector)

    # gradients  with respect to output neuron pre-activation outputs
    output_deltas = [
        output * (1 - output) * (output - target)
        for output, target in zip(outputs, target_vector)
    ]

    # gradients with respect to output neuron weights
    output_grads = [[
        output_deltas[i] * hidden_output
        for hidden_output in hidden_outputs + [1]
    ] for i, output_neuron in enumerate(network[-1])]

    # gradients with respect to hidden neuron pre-activation outputs
    hidden_deltas = [
        hidden_output * (1 - hidden_output) *
        dot(output_deltas, [n[i] for n in network[-1]])
        for i, hidden_output in enumerate(hidden_outputs)
    ]

    # gradients with respect to hidden neuron weights
    hidden_grads = [[hidden_deltas[i] * input for input in input_vector + [1]]
                    for i, hidden_neuron in enumerate(network[0])]

    return [hidden_grads, output_grads]
예제 #3
0
    def forward(self, input: Tensor) -> Tensor:
        # Save the input to use in the backward pass.
        self.input = input

        # Return the vector of neuron outputs.
        return [
            dot(input, self.w[o]) + self.b[o] for o in range(self.output_dim)
        ]
def neuron_output(weights: Vector, inputs: Vector) -> float:
    return sigmoid(dot(weights, inputs))
def perceptron_output(weights: Vector, bias: float, x: Vector) -> float:
    # Returns 1 if the perceptron 'fires, 0 if not
    return (step_function(dot(weights, x) + bias))
예제 #6
0
def transform_vector(v: Vector, components: List[Vector]) -> Vector:
    return [dot(v, w) for w in components]
예제 #7
0
def project(v: Vector, w: Vector) -> Vector:
    """Return the projection of v onto the direction w"""
    projection_length = dot(v, w)
    return scalar_multiply(projection_length, w)
예제 #8
0
def directional_variance_gradient(data: List[Vector], w: Vector) -> Vector:
    """The gradient of directional variance w.r.t w"""
    w_dir = direction(w)
    return [sum(2 * dot(v, w_dir) * v[i] for v in data) for i in range(len(w))]
예제 #9
0
def directional_variance(data: List[Vector], w: Vector) -> float:
    """Returns the variance of each vector of data matrix
    in the direction of vector w"""
    w_dir = direction(w)
    return [dot(v, w_dir)**2 for v in data]
예제 #10
0
def ridge_penalty(beta: Vector, alpha: float) -> float:
    return alpha * dot(beta[1:], beta[1:])
예제 #11
0
def predict(x: Vector, beta: Vector) -> float:
    "Assumes that first element of x is 1 (to add bias coefficient)"
    return dot(x, beta)
def _negative_log_partial_j(x: Vector, y: float, beta: Vector, j: int) -> float:
    """
    The j-th partial derivative for one data pont
    here i is the index of the data point
    """
    return -(y - logistic(dot(x, beta))) * x[j]
def _negative_log_likelihood(x:Vector, y: float, beta: Vector) -> float:
    if y == 1:
        return  -math.log(logistic(dot(x, beta)))
    else:
        return  -math.log(1 - logistic(dot(x, beta)))
means, stdevs = scale(xs)
beta_unscaled = [(beta[0]
                 - beta[1]*means[1] / stdevs[1]
                 - beta[2]*means[2] / stdevs[2]),
                 - beta[1]/ stdevs[1],
                 - beta[2]/ stdevs[2]]                
beta_unscaled

#####################################################################################################
# Test the model's performance on the testing data
####################################################################################################

true_positives = false_positives = true_negatives = false_negatives = 0

for x_i, y_i in zip(x_test, y_test):
    prediction = logistic(dot(beta,x_i))
    
    if y_i == 1 and prediction>=0.5: #TP
        true_positives += 1
    elif y_i == 1:
        false_negatives += 1
    elif prediction >= 0.5:
        false_positives += 1
    else:
        true_negatives += 1
        
precision = true_positives/(true_positives+false_positives)
recalls = true_positives/(true_positives+false_negatives)

precision,recalls
def cosine_similarity(v1: Vector, v2: Vector) -> float:
    return dot(v1, v2) / math.sqrt(dot(v1, v1) * dot(v2, v2))