예제 #1
0
파일: nlp.py 프로젝트: dbradf/dsfs
    def forward(self, inp: Tensor) -> Tensor:
        self.input = inp
        self.prev_hidden = self.hidden

        a = [(dot(self.w[h], inp) + dot(self.u[h], self.hidden) + self.b[h])
             for h in range(self.hidden_dim)]

        self.hidden = tensor_apply(tanh, a)
        return self.hidden
예제 #2
0
def loop(dataset: List[Rating],
         movie_vectors,
         user_vectors,
         learning_rate: float = None) -> None:
    with tqdm.tqdm(dataset) as t:
        loss = 0.0
        for i, rating in enumerate(t):
            movie_vector = movie_vectors[rating.movie_id]
            user_vector = user_vectors[rating.user_id]
            predicted = dot(user_vector, movie_vector)
            error = predicted - rating.rating
            loss += error**2

            if learning_rate is not None:
                user_gradient = [error * m_j for m_j in movie_vector]
                movie_gradient = [error * u_j for u_j in user_vector]

                for j in range(EMBEDDING_DIM):
                    user_vector[j] -= learning_rate * user_gradient[j]
                    movie_vector[j] -= learning_rate * movie_gradient[j]
            t.set_description(f"avg loss: {loss / (i + 1)}")
예제 #3
0
def sqerror_gradients(
    network: List[List[Vector]], input_vector: Vector, target_vector: Vector
) -> List[List[Vector]]:
    hidden_outputs, outputs = feed_forward(network, input_vector)

    output_deltas = [
        output * (1 - output) * (output - target) for output, target in zip(outputs, target_vector)
    ]

    output_grads = [
        [output_deltas[i] * hidden_output for hidden_output in hidden_outputs + [1]]
        for i, output_neuron in enumerate(network[-1])
    ]
    hidden_deltas = [
        hidden_output * (1 - hidden_output) * dot(output_deltas, [n[i] for n in network[-1]])
        for i, hidden_output in enumerate(hidden_outputs)
    ]
    hidden_grads = [
        [hidden_deltas[i] * input for input in input_vector + [1]]
        for i, hidden_neuron in enumerate(network[0])
    ]
    return [hidden_grads, output_grads]
예제 #4
0
def neuron_output(weights: Vector, inputs: Vector) -> float:
    return sigmoid(dot(weights, inputs))
예제 #5
0
def perceptron_output(weights: Vector, bias: float, x: Vector) -> float:
    calculation = dot(weights, x) + bias
    return step_function(calculation)
예제 #6
0
def project(v: Vector, w: Vector) -> Vector:
    projection_length = dot(v, w)
    return scalar_multiply(projection_length, w)
예제 #7
0
def directional_variance_gradient(data: List[Vector], w: Vector) -> Vector:
    w_dir = direction(w)
    return [sum(2 * dot(v, w_dir) * v[i] for v in data) for i in range(len(w))]
예제 #8
0
def directional_variance(data: List[Vector], w: Vector) -> float:
    w_dir = direction(w)
    return sum(dot(v, w_dir)**2 for v in data)
예제 #9
0
def transform_vector(v: Vector, components: List[Vector]) -> Vector:
    return [dot(v, w) for w in components]
예제 #10
0
def _negative_log_partial_j(x: Vector, y: float, beta: Vector, j: int) -> float:
    return -(y - logistic(dot(x, beta))) * x[j]
예제 #11
0
def matrix_times_vector(m: Matrix, v: Vector) -> Vector:
    nr, nc = shape(m)
    n = len(v)
    assert nc == n

    return [dot(row, v) for row in m]
예제 #12
0
파일: stats.py 프로젝트: dbradf/dsfs
def covariance(xs: List[float], ys: List[float]) -> float:
    assert len(xs) == len(ys), "xs and ys be the same size"

    return dot(de_mean(xs), de_mean(ys)) / (len(xs) - 1)
예제 #13
0
def ridge_penalty(beta: Vector, alpha: float) -> float:
    return alpha * dot(beta[1:], beta[1:])
예제 #14
0
def predict(x: Vector, beta: Vector) -> float:
    return dot(x, beta)
예제 #15
0
파일: nlp.py 프로젝트: dbradf/dsfs
def cosine_similarity(v1: Vector, v2: Vector) -> float:
    return dot(v1, v2) / math.sqrt(dot(v1, v1) * dot(v2, v2))
예제 #16
0
def test_dot():
    assert under_test.dot([1, 2, 3], [4, 5, 6]) == 32
예제 #17
0
 def forward(self, inp: Tensor) -> Tensor:
     self.inp = inp
     return [
         dot(inp, self.w[o]) + self.b[o] for o in range(self.output_dim)
     ]
예제 #18
0
def _negative_log_likelihood(x: Vector, y: float, beta: Vector) -> float:
    if y == 1:
        return -math.log(logistic(dot(x, beta)))
    else:
        return -math.log(1 - logistic(dot(x, beta)))