Exemple #1
0
def first_principal_component_sgd(X):
    guess = [1 for _ in X[0]]
    unscaled_maximizer = maximize_stochastic(
        lambda x, _, w: directional_variance_i(x, w),
        lambda x, _, w: directional_variance_gradient_i(x, w),
        X, [None for _ in X], guess)
    return direction(unscaled_maximizer)
def first_principal_component_stochastic(matrix):
    guess = [1 for _ in matrix[0]]
    unscaled_maximizer = maximize_stochastic(
        lambda x, _, vector: directional_variance_row(x, vector),
        lambda x, _, vector: directional_variance_gradient_row(x, vector),
        matrix,
        [None for _ in matrix[0]],  # fake "y"
        guess)
    return direction(unscaled_maximizer)
def first_principal_component_stochastic(matrix):
    guess = [1 for _ in matrix[0]]
    unscaled_maximizer = maximize_stochastic(
        lambda x, _, vector: directional_variance_row(x, vector),
        lambda x, _, vector: directional_variance_gradient_row(x, vector),
        matrix,
        [None for _ in matrix[0]],  # fake "y"
        guess
    )
    return direction(unscaled_maximizer)
Exemple #4
0
    # want to maximize log likelihood on the training data
    fn = partial(logistic_log_likelihood, x_train, y_train)
    gradient_fn = partial(logistic_log_gradient, x_train, y_train)

    # pick a random starting point
    beta_0 = [1, 1, 1]

    # and maximize using gradient descent
    beta_hat = maximize_batch(fn, gradient_fn, beta_0)

    print "beta_batch", beta_hat

    beta_0 = [1, 1, 1]
    beta_hat = maximize_stochastic(logistic_log_likelihood_i,
                                   logistic_log_gradient_i, x_train, y_train,
                                   beta_0)

    print "beta stochastic", beta_hat

    true_positives = false_positives = true_negatives = false_negatives = 0

    for x_i, y_i in zip(x_test, y_test):
        predict = logistic(dot(beta_hat, x_i))

        if y_i == 1 and predict >= 0.5:  # TP: paid and we predict paid
            true_positives += 1
        elif y_i == 1:  # FN: paid and we predict unpaid
            false_negatives += 1
        elif predict >= 0.5:  # FP: unpaid and we predict paid
            false_positives += 1
    # want to maximize log likelihood on the training data
    fn = partial(logistic_log_likelihood, x_train, y_train)
    gradient_fn = partial(logistic_log_gradient, x_train, y_train)

    # pick a random starting point
    beta_0 = [1, 1, 1]

    # and maximize using gradient descent
    beta_hat = maximize_batch(fn, gradient_fn, beta_0)

    print("beta_batch", beta_hat)

    beta_0 = [1, 1, 1]
    beta_hat = maximize_stochastic(logistic_log_likelihood_i,
                                   logistic_log_gradient_i,
                                   x_train, y_train, beta_0)

    print("beta stochastic", beta_hat)

    true_positives = false_positives = true_negatives = false_negatives = 0

    for x_i, y_i in zip(x_test, y_test):
        predict = logistic(dot(beta_hat, x_i))

        if y_i == 1 and predict >= 0.5:  # TP: paid and we predict paid
            true_positives += 1
        elif y_i == 1:                   # FN: paid and we predict unpaid
            false_negatives += 1
        elif predict >= 0.5:             # FP: unpaid and we predict paid
            false_positives += 1