Example #1
0
def learn(Y, R, num_features, regularization_lambda=0):
    """

    :param Y: num_movies x num_users
    :param R: num_movies x num_users
    :param num_features: how many features should be generated
    :param regularization_lambda:
    :return:
    """
    assert Y.shape == R.shape

    num_movies = Y.shape[0]
    num_users = Y.shape[1]

    X = random_initialize((num_movies, num_features))
    Theta = random_initialize((num_users, num_features))

    optimize_result = op.minimize(fun=cost_function_gradient,
                                  x0=utils.flatten_and_stack([X, Theta])[0],
                                  args=([X.shape, Theta.shape], Y, R,
                                        regularization_lambda),
                                  method="CG",
                                  jac=True)
    Matrices = utils.roll(optimize_result.x, [X.shape, Theta.shape])

    X = Matrices[0]
    Theta = Matrices[1]

    return X, Theta
Example #2
0
def neural_network_cost_gradient_unrolled(theta,
                                          X,
                                          Y,
                                          shapes,
                                          regularization_lambda=0):
    Thetas = utils.roll(theta, shapes)

    (cost, Deltas) = neural_network_cost_gradient(X, Y, Thetas,
                                                  regularization_lambda)

    return cost, utils.flatten_and_stack(Deltas)[0].reshape((-1))
Example #3
0
def cost_function_gradient(params, shapes, Y, R, regularization_lambda=0):
    assert len(shapes) == 2
    Matrices = utils.roll(params, shapes)
    X = Matrices[0]
    Theta = Matrices[1]

    (X_grad, Theta_grad) = cost_function_derivative(X, Y, R, Theta,
                                                    regularization_lambda)
    return cost_function(X, Y, R, Theta,
                         regularization_lambda), utils.flatten_and_stack(
                             [X_grad, Theta_grad])[0].reshape((-1))
Example #4
0
predictions = predictions + 1

print(np.mean(predictions == y))

theta_0 = nn.initialize_random_theta((25, 400))
theta_1 = nn.initialize_random_theta((10, 25))

(theta_vec, shapes) = utils.flatten_and_stack([theta_0, theta_1])

result = op.minimize(fun=nn.neural_network_cost_gradient_unrolled,
                     x0=theta_vec.reshape((-1)),
                     args=(X, Y, shapes, 1),
                     method="CG",
                     jac=True,
                     options={
                         "maxiter": 100,
                         "disp": True
                     })

print(result)

Thetas = utils.roll(result.x, shapes)

Output = nn.feed_forward(X, Thetas)

predictions = np.argmax(Output, axis=1).reshape((X.shape[0], 1))

predictions = predictions + 1

print(np.mean(predictions == y))
Example #5
0
def neural_network_cost_unrolled(X, Y, theta, shapes, regularization_lambda=0):
    Thetas = utils.roll(theta, shapes)
    return neural_network_cost(X, Y, Thetas, regularization_lambda)
Example #6
0
 def cost_function(theta):
     Matrices = utils.roll(theta, shapes)
     return cofi.cost_function(Matrices[0], Y, R, Matrices[1],
                               regularization_lambda)