예제 #1
0
def estimate_beta_ridge(x, y, alpha):
    """use gradient descent to fit a ridge regression with penalty alpha"""
    beta_initial = [random.random() for x_i in x[0]]
    return gradient.minimize_stochastic(
        partial(squared_error_ridge, alpha=alpha),
        partial(squared_error_gradient, alpha=alpha), x, y, beta_initial,
        0.001)
예제 #2
0
def estimate_beta(x, y):
    """Find the optimal beta using stochastic gradient descent"""
    beta_initial = [random.random() for x_i in x[0]]
    return gradient.minimize_stochastic(squared_error,
                                        squared_error_gradient,
                                        x,
                                        y,
                                        beta_initial,
                                        0.001)
예제 #3
0
def estimate_beta_ridge(x, y, alpha):
    """use gradient descent to fit a ridge regression with penalty alpha"""
    beta_initial = [random.random() for x_i in x[0]]
    return gradient.minimize_stochastic(partial(squared_error_ridge,
                                                alpha=alpha),
                                        partial(squared_error_gradient,
                                                alpha=alpha),
                                        x,
                                        y,
                                        beta_initial,
                                        0.001)
예제 #4
0
def estimate_beta(x, y):
    """Find the optimal beta using stochastic gradient descent"""
    beta_initial = [random.random() for x_i in x[0]]
    return gradient.minimize_stochastic(squared_error, squared_error_gradient,
                                        x, y, beta_initial, 0.001)