def estimate_beta_ridge(x, y, alpha): """use gradient descent to fit a ridge regression with penalty alpha""" beta_initial = [random.random() for x_i in x[0]] return gradient.minimize_stochastic( partial(squared_error_ridge, alpha=alpha), partial(squared_error_gradient, alpha=alpha), x, y, beta_initial, 0.001)
def estimate_beta(x, y): """Find the optimal beta using stochastic gradient descent""" beta_initial = [random.random() for x_i in x[0]] return gradient.minimize_stochastic(squared_error, squared_error_gradient, x, y, beta_initial, 0.001)
def estimate_beta_ridge(x, y, alpha): """use gradient descent to fit a ridge regression with penalty alpha""" beta_initial = [random.random() for x_i in x[0]] return gradient.minimize_stochastic(partial(squared_error_ridge, alpha=alpha), partial(squared_error_gradient, alpha=alpha), x, y, beta_initial, 0.001)