def bench(n_lambdas=100, eps=1e-4): lambda_range = lambda_min, lambda_max default_lambdas = lambda_max * \ (lambda_min / lambda_max) ** (np.arange(n_lambdas) / (n_lambdas - 1.)) tic = time.time() eps_ = eps * np.linalg.norm(y) ** 2 model = enet_solvers(X, y, default_lambdas, None, mu, eps_) tac = time.time() - tic times[0, i_grid] = tac betas = model[1] gaps = model[2] n_grids[0, i_grid] = default_lambdas.shape[0] max_gaps[0, i_grid] = np.max(gaps) eps_path = error_grid(X, y, betas, gaps, default_lambdas, mu, nu) xi = eps_path errors_path[0, i_grid] = xi print("\n time default grid = ", tac, "error default = ", xi, "max_gap = ", np.max(gaps)) for i_method, method in enumerate(grid_names[1:]): i_method += 1 if method == "Uniform unilateral": adaptive = False large_step = False elif method == "Uniform bilateral": adaptive = False large_step = True elif method == "Adaptive unilateral": adaptive = True large_step = False else: # method is "Adaptive bilateral": adaptive = True large_step = True tic = time.time() lambdas, gaps, betas = compute_path(X, y, xi, mu, nu, lambda_range, adaptive, large_step, tau) tac = time.time() - tic times[i_method, i_grid] = tac n_grids[i_method, i_grid] = lambdas.shape[0] max_gaps[i_method, i_grid] = np.max(gaps) error = error_grid(X, y, betas, gaps, lambdas, mu, nu) errors_path[i_method, i_grid] = error print("time = ", tac, "error = ", errors_path[i_method, i_grid], "n_grid = ", lambdas.shape[0], "max_gap = ", np.max(gaps))
lars_aug_lambdas, lars_aug_betas = augmented_lars(lars_betas, lars_lambdas, 10) lars_aug_n_lambdas = lars_aug_lambdas.shape[0] lars_aug_obj = np.empty(lars_aug_n_lambdas) for t in range(lars_aug_n_lambdas): lars_aug_obj[t] = lasso_objective(X, y, lars_aug_betas[:, t], lars_aug_lambdas[t] * n_samples) print("Computing approximation of the regularization path") mu = 0. nu = 1. tau = 2. xi = np.linalg.norm(y)**2 / 20. lambda_range = lambda_min, lambda_max approx_lambdas, approx_gaps, approx_betas =\ compute_path(X, y, xi, mu, nu, lambda_range, adaptive=True, large_step=False, tau=tau) approx_n_lambdas = approx_lambdas.shape[0] approx_obj = np.empty(2 * approx_n_lambdas - 1) approx_aug_lambdas = np.empty(2 * approx_n_lambdas - 1) for t in np.arange(approx_n_lambdas): approx_obj[2 * t] = lasso_objective(X, y, approx_betas[:, t], approx_lambdas[t]) if t == approx_n_lambdas - 1: break approx_obj[2 * t + 1] = lasso_objective(X, y, approx_betas[:, t], approx_lambdas[t + 1]) for t in np.arange(approx_n_lambdas):
eps_min = delta_eps / 10. eps_max = delta_eps * 10. eps_vals = np.geomspace(eps_min, eps_max, n_eval) selected_errors = np.empty(n_eval) selected_lambdas = np.empty(n_eval) for i_eps_val, eps_val in enumerate(eps_vals): nu = 1. / mu xi = 0.5 * mu * (eps_val / norm_X_test)**2 print("Sequential computation of the path with") tic = time.time() path_lambdas, path_gaps, path_betas =\ compute_path(X_train, y_train, xi, mu, nu, lambda_range, adaptive=True, large_step=True) tac = time.time() - tic path_approx_errors = np.empty(path_lambdas.shape[0]) for i_lmd, lambda_ in enumerate(path_lambdas): path_approx_errors[i_lmd] =\ np.linalg.norm(X_test.dot(path_betas[:, i_lmd]) - y_test) if i_eps_val <= 1: i_best = np.argmin(path_approx_errors) else: tmp = np.where( path_approx_errors - np.min(approx_errors) <= eps_val)[0] worst_best = np.max(path_approx_errors[tmp])