def forward(self, inp: Tensor) -> Tensor: self.input = inp self.prev_hidden = self.hidden a = [(dot(self.w[h], inp) + dot(self.u[h], self.hidden) + self.b[h]) for h in range(self.hidden_dim)] self.hidden = tensor_apply(tanh, a) return self.hidden
def loop(dataset: List[Rating], movie_vectors, user_vectors, learning_rate: float = None) -> None: with tqdm.tqdm(dataset) as t: loss = 0.0 for i, rating in enumerate(t): movie_vector = movie_vectors[rating.movie_id] user_vector = user_vectors[rating.user_id] predicted = dot(user_vector, movie_vector) error = predicted - rating.rating loss += error**2 if learning_rate is not None: user_gradient = [error * m_j for m_j in movie_vector] movie_gradient = [error * u_j for u_j in user_vector] for j in range(EMBEDDING_DIM): user_vector[j] -= learning_rate * user_gradient[j] movie_vector[j] -= learning_rate * movie_gradient[j] t.set_description(f"avg loss: {loss / (i + 1)}")
def sqerror_gradients( network: List[List[Vector]], input_vector: Vector, target_vector: Vector ) -> List[List[Vector]]: hidden_outputs, outputs = feed_forward(network, input_vector) output_deltas = [ output * (1 - output) * (output - target) for output, target in zip(outputs, target_vector) ] output_grads = [ [output_deltas[i] * hidden_output for hidden_output in hidden_outputs + [1]] for i, output_neuron in enumerate(network[-1]) ] hidden_deltas = [ hidden_output * (1 - hidden_output) * dot(output_deltas, [n[i] for n in network[-1]]) for i, hidden_output in enumerate(hidden_outputs) ] hidden_grads = [ [hidden_deltas[i] * input for input in input_vector + [1]] for i, hidden_neuron in enumerate(network[0]) ] return [hidden_grads, output_grads]
def neuron_output(weights: Vector, inputs: Vector) -> float: return sigmoid(dot(weights, inputs))
def perceptron_output(weights: Vector, bias: float, x: Vector) -> float: calculation = dot(weights, x) + bias return step_function(calculation)
def project(v: Vector, w: Vector) -> Vector: projection_length = dot(v, w) return scalar_multiply(projection_length, w)
def directional_variance_gradient(data: List[Vector], w: Vector) -> Vector: w_dir = direction(w) return [sum(2 * dot(v, w_dir) * v[i] for v in data) for i in range(len(w))]
def directional_variance(data: List[Vector], w: Vector) -> float: w_dir = direction(w) return sum(dot(v, w_dir)**2 for v in data)
def transform_vector(v: Vector, components: List[Vector]) -> Vector: return [dot(v, w) for w in components]
def _negative_log_partial_j(x: Vector, y: float, beta: Vector, j: int) -> float: return -(y - logistic(dot(x, beta))) * x[j]
def matrix_times_vector(m: Matrix, v: Vector) -> Vector: nr, nc = shape(m) n = len(v) assert nc == n return [dot(row, v) for row in m]
def covariance(xs: List[float], ys: List[float]) -> float: assert len(xs) == len(ys), "xs and ys be the same size" return dot(de_mean(xs), de_mean(ys)) / (len(xs) - 1)
def ridge_penalty(beta: Vector, alpha: float) -> float: return alpha * dot(beta[1:], beta[1:])
def predict(x: Vector, beta: Vector) -> float: return dot(x, beta)
def cosine_similarity(v1: Vector, v2: Vector) -> float: return dot(v1, v2) / math.sqrt(dot(v1, v1) * dot(v2, v2))
def test_dot(): assert under_test.dot([1, 2, 3], [4, 5, 6]) == 32
def forward(self, inp: Tensor) -> Tensor: self.inp = inp return [ dot(inp, self.w[o]) + self.b[o] for o in range(self.output_dim) ]
def _negative_log_likelihood(x: Vector, y: float, beta: Vector) -> float: if y == 1: return -math.log(logistic(dot(x, beta))) else: return -math.log(1 - logistic(dot(x, beta)))