Exemplo n.º 1
0
def first_principal_component(X):
    guess = [1 for _ in X[0]]
    unscaled_maximizer = maximize_batch(
        partial(directional_variance, X),  # is now a function of w
        partial(directional_variance_gradient, X),  # is now a function of w
        guess)
    return direction(unscaled_maximizer)
def first_principal_component(X):
    guess = [1 for _ in X[0]]
    unscaled_maximizer = maximize_batch(
        partial(directional_variance, X),           # is now a function of w
        partial(directional_variance_gradient, X),  # is now a function of w
        guess)
    return direction(unscaled_maximizer)
def first_principal_component(matrix):
    guess = [1 for _ in matrix[0]]
    unscaled_maximizer = maximize_batch(
        partial(directional_variance, matrix),
        partial(directional_variance_gradient, matrix),
        guess
    )
    return direction(unscaled_maximizer)
Exemplo n.º 4
0
def first_principal_component_sgd(X):
    guess = [1 for _ in X[0]]
    unscaled_maximizer = maximize_batch(
        lambda x, _, w: directional_variance_i(x, w),
        lambda x, _, w: directional_variance_gradient_i(x, w),
        X,
        [None for _ in X],
        guess)
    return direction(unscaled_maximizer)
Exemplo n.º 5
0
def first_principal_component(X):
    guess = [1 for _ in X[0]]
    unscaled_maximizer = maximize_batch(
        functools.partial(directional_variance, X),
        functools.partial(directional_variance_gradient, X),
        guess)
    print(unscaled_maximizer)
    print("after unscaled unscaled_maximizer")
    return direction(unscaled_maximizer)
 def test_maximize_batch(self):
     # cette parabole est maximisée pour [-1 -1]
     optimized = gradient_descent.maximize_batch(
         lambda v: - ((v[0]+1) ** 2 + (v[1]+1) ** 2),  # f(x,y)= - ((x+1)**2 + (y+1)**2)
         lambda v: scalar_multiply(-2, vector_add(v, [1, 1])),  # f'(x,y) = [-2(x+1) -2(y+1)]
         [3, 2],
         tolerance=0.000001
     )
     for index, value in enumerate(optimized):
         self.assertAlmostEquals(-1, value, places=2,
                                 msg='Value {0} not optimized to -1 for dimension of index: {1}'.format(value, index))
 def test_maximize_batch(self):
     # cette parabole est maximisée pour [-1 -1]
     optimized = gradient_descent.maximize_batch(
         lambda v: -((v[0] + 1)**2 +
                     (v[1] + 1)**2),  # f(x,y)= - ((x+1)**2 + (y+1)**2)
         lambda v: scalar_multiply(-2, vector_add(v, [1, 1])
                                   ),  # f'(x,y) = [-2(x+1) -2(y+1)]
         [3, 2],
         tolerance=0.000001)
     for index, value in enumerate(optimized):
         self.assertAlmostEquals(
             -1,
             value,
             places=2,
             msg='Value {0} not optimized to -1 for dimension of index: {1}'
             .format(value, index))
def first_principal_component(matrix):
    guess = [1 for _ in matrix[0]]
    unscaled_maximizer = maximize_batch(
        partial(directional_variance, matrix),
        partial(directional_variance_gradient, matrix), guess)
    return direction(unscaled_maximizer)
Exemplo n.º 9
0
    print beta

    print "logistic regression:"

    random.seed(0)
    x_train, x_test, y_train, y_test = train_test_split(rescaled_x, y, 0.33)

    # want to maximize log likelihood on the training data
    fn = partial(logistic_log_likelihood, x_train, y_train)
    gradient_fn = partial(logistic_log_gradient, x_train, y_train)

    # pick a random starting point
    beta_0 = [1, 1, 1]

    # and maximize using gradient descent
    beta_hat = maximize_batch(fn, gradient_fn, beta_0)

    print "beta_batch", beta_hat

    beta_0 = [1, 1, 1]
    beta_hat = maximize_stochastic(logistic_log_likelihood_i,
                                   logistic_log_gradient_i, x_train, y_train,
                                   beta_0)

    print "beta stochastic", beta_hat

    true_positives = false_positives = true_negatives = false_negatives = 0

    for x_i, y_i in zip(x_test, y_test):
        predict = logistic(dot(beta_hat, x_i))
Exemplo n.º 10
0
def first_prin_com(X):
    guess = [1 for _ in X[0]]
    unscaled_maximizer = sgd.maximize_batch(partial(directional_var, X),
                                            partial(dir_var_grad, X), guess)
    return direction(unscaled_maximizer)
Exemplo n.º 11
0
def first_principal_component(X):
    guess = [1 for _ in X[0]]
    unscaled_maximizer = maximize_batch(
        partial(directional_variance_i, X),
        partial(directional_variance_gradient_i, X), guess)
    return direction(unscaled_maximizer)
Exemplo n.º 12
0
    (4.2, 78000, 0),
    (1.1, 54000, 0),
    (6.2, 60000, 0),
    (2.9, 59000, 0),
    (2.1, 52000, 0),
    (8.2, 87000, 0),
    (4.8, 73000, 0),
    (2.2, 42000, 1),
    (9.1, 98000, 0),
    (6.5, 84000, 0),
    (6.9, 73000, 0),
    (5.1, 72000, 0),
    (9.1, 69000, 1),
    (9.8, 79000, 1),
]
data = list(map(list, data))  # change tuples to lists

x = [[1] + row[:2] for row in data]
y = [row[2] for row in data]

rescaled_x = sd.rescale(x)

x_train, y_train, x_test, y_test = train_test_split(rescaled_x, y, 0.33)
fn = partial(logistic_log_likelihood, x_train, y_train)
grad_fn = partial(logistic_gradient, x_train, y_train)

beta_0 = [random.random() for _ in range(3)]

beta_hat = gd.maximize_batch(fn, grad_fn, beta_0)
print(beta_hat)
    print(beta)

    print("logistic regression:")

    random.seed(0)
    x_train, x_test, y_train, y_test = train_test_split(rescaled_x, y, 0.33)

    # want to maximize log likelihood on the training data
    fn = partial(logistic_log_likelihood, x_train, y_train)
    gradient_fn = partial(logistic_log_gradient, x_train, y_train)

    # pick a random starting point
    beta_0 = [1, 1, 1]

    # and maximize using gradient descent
    beta_hat = maximize_batch(fn, gradient_fn, beta_0)

    print("beta_batch", beta_hat)

    beta_0 = [1, 1, 1]
    beta_hat = maximize_stochastic(logistic_log_likelihood_i,
                                   logistic_log_gradient_i,
                                   x_train, y_train, beta_0)

    print("beta stochastic", beta_hat)

    true_positives = false_positives = true_negatives = false_negatives = 0

    for x_i, y_i in zip(x_test, y_test):
        predict = logistic(dot(beta_hat, x_i))
Exemplo n.º 14
0
def first_principal_component(X):
    guess = [1 for _ in X[0]]
    unscaled_maximizer = maximize_batch(
        partial(directional_variance, X), guess)
    return direction(unscaled_maximizer)