def make_nystrom_evaluation(x_train, U_centroids): gamma = compute_euristic_gamma(x_train) nystrom_build_start_time = time.time() basis_kernel_W = special_rbf_kernel(U_centroids, U_centroids, gamma) U, S, V = np.linalg.svd(basis_kernel_W) S = np.maximum(S, 1e-12) normalization_ = np.dot(U / np.sqrt(S), V) nystrom_build_stop_time = time.time() nystrom_build_time = nystrom_build_stop_time - nystrom_build_start_time n_sample = 5000 # todo utiliser un plus grand sample pour mieux voir le gain de temps indexes_samples = np.random.permutation(x_train.shape[0])[:n_sample] sample = x_train[indexes_samples] real_kernel = special_rbf_kernel(sample, sample, gamma) nystrom_inference_time_start = time.time() nystrom_embedding = special_rbf_kernel(U_centroids, sample, gamma).T @ normalization_ nystrom_approx_kernel_value = nystrom_embedding @ nystrom_embedding.T nystrom_inference_time_stop = time.time() nystrom_inference_time = nystrom_inference_time_stop - nystrom_inference_time_start sampled_froebenius_norm = np.linalg.norm(nystrom_approx_kernel_value - real_kernel) nystrom_results = { "nystrom_build_time": nystrom_build_time, "nystrom_inference_time": nystrom_inference_time, "nystrom_sampled_error_reconstruction": sampled_froebenius_norm } resprinter.add(nystrom_results)
def setUp(self): self.n_features = 2000 self.n_data = 100 sparsity = 2 self.sparse_data = create_sparse_factors( shape=(self.n_data * 2, self.n_features), n_factors=int( np.ceil(np.log2(min(self.n_data * 2, self.n_features)))), sparsity_level=sparsity) self.data = self.sparse_data.compute_product(return_array=True) self.data_verylittle = np.random.rand(*self.data.shape) * 1e2 self.data_norm = get_squared_froebenius_norm_line_wise(self.data) self.sparse_data_norm = get_squared_froebenius_norm_line_wise( self.sparse_data) self.data_norm_verylittle = get_squared_froebenius_norm_line_wise( self.data_verylittle) self.gamma = compute_euristic_gamma(self.data) self.random_data = np.random.rand(self.n_data, self.n_features) self.mnist = mnist_dataset()["x_train"].astype(np.float64) self.fashionmnist = fashion_mnist_dataset()["x_train"].astype( np.float64) # self.caltech = caltech_dataset(28)["x_train"].astype(np.float64) self.pairs_data = { "notSparse": self.data[:self.n_data], "mnist": self.mnist[:self.n_data], # "caltech": self.caltech[:self.n_data], "fashmnist": self.fashionmnist[:self.n_data], } self.example_data = { "notSparse": self.data[self.n_data:self.n_data * 2], "mnist": self.mnist[self.n_data:self.n_data * 2], # "caltech": self.caltech[self.n_data:self.n_data*2], "fashmnist": self.fashionmnist[self.n_data:self.n_data * 2] } self.norm_data = { "notSparse": row_norms(self.pairs_data["notSparse"], squared=True)[:, np.newaxis], "mnist": get_squared_froebenius_norm_line_wise( self.pairs_data["mnist"])[:, np.newaxis], # "caltech": row_norms(self.pairs_data["caltech"], squared=True)[:, np.newaxis], "fashmnist": row_norms(self.pairs_data["fashmnist"], squared=True)[:, np.newaxis], } self.norm_example_data = { "notSparse": row_norms(self.example_data["notSparse"], squared=True)[:, np.newaxis], "mnist": get_squared_froebenius_norm_line_wise(self.example_data["mnist"]), # "caltech": row_norms(self.example_data["caltech"], squared=True)[:, np.newaxis], "fashmnist": row_norms(self.example_data["fashmnist"], squared=True)[:, np.newaxis], } self.gamma_data = { "notSparse": compute_euristic_gamma(self.pairs_data["notSparse"]), "mnist": compute_euristic_gamma(self.pairs_data["mnist"]), # "caltech": compute_euristic_gamma(self.pairs_data["caltech"]), "fashmnist": compute_euristic_gamma(self.pairs_data["fashmnist"]), }
def make_nystrom_evaluation(x_train, U_centroids): """ Evaluation Nystrom construction time and approximation precision. The approximation is based on a subsample of size n_sample of the input data set. :param x_train: Input dataset as ndarray. :param U_centroids: The matrix of centroids as ndarray or SparseFactor object :param n_sample: The number of sample to take into account in the reconstruction (can't be too large) :return: """ n_sample = paraman["--nystrom"] if n_sample > x_train.shape[0]: logger.warning( "Batch size for nystrom evaluation is bigger than data size. {} > {}. Using " "data size instead.".format(n_sample, x_train.shape[0])) n_sample = x_train.shape[0] paraman["--nystrom"] = n_sample # Compute euristic gamma as the mean of euclidian distance between example gamma = compute_euristic_gamma(x_train) log_memory_usage( "Memory after euristic gamma computation in make_nystrom_evaluation") # precompute the centroids norm for later use (optimization) # centroids_norm = get_squared_froebenius_norm(landmarks) centroids_norm = None ## TIME: nystrom build time # nystrom build time is Nystrom preparation time for later use. ## START nystrom_build_start_time = time.time() basis_kernel_W = special_rbf_kernel(U_centroids, U_centroids, gamma, centroids_norm, centroids_norm) log_memory_usage( "Memory after K_11 computation in make_nystrom_evaluation") U, S, V = np.linalg.svd(basis_kernel_W) log_memory_usage("Memory after SVD computation in make_nystrom_evaluation") S = np.maximum(S, 1e-12) normalization_ = np.dot(U / np.sqrt(S), V) nystrom_build_stop_time = time.time() # STOP nystrom_build_time = nystrom_build_stop_time - nystrom_build_start_time indexes_samples = np.random.permutation(x_train.shape[0])[:n_sample] sample = x_train[indexes_samples] log_memory_usage( "Memory after sample selection in make_nystrom_evaluation") # samples_norm = np.linalg.norm(sample, axis=1) ** 2 samples_norm = None real_kernel = special_rbf_kernel(sample, sample, gamma, samples_norm, samples_norm) log_memory_usage( "Memory after real kernel computation in make_nystrom_evaluation") ## TIME: nystrom inference time # Nystrom inference time is the time for Nystrom transformation for all the samples. ## START nystrom_inference_time_start = time.time() nystrom_embedding = special_rbf_kernel(U_centroids, sample, gamma, centroids_norm, samples_norm).T @ normalization_ log_memory_usage( "Memory after embedding computation in make_nystrom_evaluation") nystrom_approx_kernel_value = nystrom_embedding @ nystrom_embedding.T log_memory_usage( "Memory after kernel matrix approximation in make_nystrom_evaluation") nystrom_inference_time_stop = time.time() ## STOP nystrom_inference_time = (nystrom_inference_time_stop - nystrom_inference_time_start) / n_sample sampled_froebenius_norm = np.linalg.norm(nystrom_approx_kernel_value - real_kernel) nystrom_results = { "nystrom_build_time": nystrom_build_time, "nystrom_inference_time": nystrom_inference_time, "nystrom_sampled_error_reconstruction": sampled_froebenius_norm } resprinter.add(nystrom_results)
replace=False) else: train_indexes = np.arange(dataset["x_train"].shape[0]) logger.info("Train size: {}".format(train_indexes.size)) scaler = StandardScaler(with_std=False) data_train = dataset["x_train"][train_indexes] data_train = scaler.fit_transform(data_train) deficit_dim_before_power_of_two = 2**next_power_of_two( data_train.shape[1]) - data_train.shape[1] data_train = np.pad(data_train, [(0, 0), (0, deficit_dim_before_power_of_two)], 'constant') gamma = compute_euristic_gamma(data_train) logger.info("Start Nyström reconstruction evaluation".format( paraman["--nystrom"])) if "x_test" in dataset.keys(): data_test = dataset["x_test"] data_test = scaler.transform(data_test) data_test = np.pad(data_test, [(0, 0), (0, deficit_dim_before_power_of_two)], 'constant') nystrom_eval = lambda landmarks: make_nystrom_evaluation( x_train=data_train, y_train=dataset["y_train"][train_indexes], x_test=data_test, y_test=dataset["y_test"],
result = np.hstack( [v[:, np.newaxis] * hadamard(v.size).T for v in self.seeds]).T return result if __name__ == "__main__": x, y = datasets.load_digits(return_X_y=True) # x = np.pad(x, (0, 2), 'constant') nb_seeds = 5 dim = 2 data = x scaler = StandardScaler(with_std=False) data = scaler.fit_transform(data) gamma = compute_euristic_gamma(data) # data_norm = np.linalg.norm(x, axis=1)[:, np.newaxis] data_norm = None plt.scatter(data[:, 0], data[:, 1], color="b") seeds = data[np.random.permutation(data.shape[0])[:nb_seeds]] uop = UfastOperator(seeds, FWHT) uop_arr = uop.toarray() uniform_sample = data[np.random.permutation( data.shape[0])[:uop_arr.shape[0]]] uop_arr_norm = np.linalg.norm(uop_arr, axis=1)[:, np.newaxis] plt.scatter(seeds[:, 0], seeds[:, 1], marker="x", s=200, color='r') plt.scatter(uop_arr[:, 0], uop_arr[:, 1], color='g') # plt.show() real_kernel_value = rbf_kernel(x, gamma=gamma)
def make_nystrom_evaluation(x_train, y_train, x_test, y_test, U_centroids): """ Evaluation Nystrom construction time and approximation precision. The approximation is based on a subsample of size n_sample of the input data set. :param x_train: Input dataset as ndarray. :param U_centroids: The matrix of centroids as ndarray or SparseFactor object :param n_sample: The number of sample to take into account in the reconstruction (can't be too large) :return: """ n_sample = paraman["--nystrom"] if n_sample > x_train.shape[0]: logger.warning("Batch size for nystrom evaluation is bigger than data size. {} > {}. Using " "data size instead.".format(n_sample, x_train.shape[0])) n_sample = x_train.shape[0] paraman["--nystrom"] = n_sample # Compute euristic gamma as the mean of euclidian distance between example gamma = compute_euristic_gamma(x_train) log_memory_usage("Memory after euristic gamma computation in make_nystrom_evaluation") # precompute the centroids norm for later use (optimization) centroids_norm = get_squared_froebenius_norm_line_wise(U_centroids)[:, np.newaxis] # centroids_norm = None indexes_samples = np.random.permutation(x_train.shape[0])[:n_sample] sample = x_train[indexes_samples] samples_norm = None log_memory_usage("Memory after sample selection in make_nystrom_evaluation") ######################## # Nystrom on centroids # ######################## logger.info("Build Nystrom on centroids") ## TIME: nystrom build time # nystrom build time is Nystrom preparation time for later use. ## START nystrom_build_start_time = time.process_time() metric = prepare_nystrom(U_centroids, centroids_norm, gamma=gamma) nystrom_build_stop_time = time.process_time() log_memory_usage("Memory after SVD computation in make_nystrom_evaluation") # STOP nystrom_build_time = nystrom_build_stop_time - nystrom_build_start_time ## TIME: nystrom inference time # Nystrom inference time is the time for Nystrom transformation for all the samples. ## START nystrom_inference_time_start = time.process_time() nystrom_embedding = nystrom_transformation(sample, U_centroids, metric, centroids_norm, samples_norm, gamma=gamma) nystrom_approx_kernel_value = nystrom_embedding @ nystrom_embedding.T nystrom_inference_time_stop = time.process_time() log_memory_usage("Memory after kernel matrix approximation in make_nystrom_evaluation") ## STOP nystrom_inference_time = (nystrom_inference_time_stop - nystrom_inference_time_start) / n_sample ################################################################ ###################### # Nystrom on uniform # ###################### logger.info("Build Nystrom on uniform sampling") indexes_uniform_samples = np.random.permutation(x_train.shape[0])[:U_centroids.shape[0]] uniform_sample = x_train[indexes_uniform_samples] uniform_sample_norm = get_squared_froebenius_norm_line_wise(uniform_sample)[:, np.newaxis] log_memory_usage("Memory after uniform sample selection in make_nystrom_evaluation") metric_uniform = prepare_nystrom(uniform_sample, uniform_sample_norm, gamma=gamma) log_memory_usage("Memory after SVD computation in uniform part of make_nystrom_evaluation") nystrom_embedding_uniform = nystrom_transformation(sample, uniform_sample, metric_uniform, uniform_sample_norm, samples_norm, gamma=gamma) nystrom_approx_kernel_value_uniform = nystrom_embedding_uniform @ nystrom_embedding_uniform.T ################################################################# ############### # Real Kernel # ############### logger.info("Compute real kernel matrix") real_kernel_special = special_rbf_kernel(sample, sample, gamma, norm_X=samples_norm, norm_Y=samples_norm) # real_kernel = rbf_kernel(sample, sample, gamma) real_kernel_norm = np.linalg.norm(real_kernel_special) log_memory_usage("Memory after real kernel computation in make_nystrom_evaluation") ################################# # Sklearn based Nystrom uniform # ################################# # sklearn_nystrom = Nystroem(gamma=gamma, n_components=uniform_sample.shape[0]) # sklearn_nystrom = sklearn_nystrom.fit(uniform_sample) # sklearn_transfo = sklearn_nystrom.transform(sample) # kernel_sklearn_nys = sklearn_transfo @ sklearn_transfo.T ################################################################ #################### # Error evaluation # #################### sampled_froebenius_norm = np.linalg.norm(nystrom_approx_kernel_value - real_kernel_special) / real_kernel_norm sampled_froebenius_norm_uniform = np.linalg.norm(nystrom_approx_kernel_value_uniform - real_kernel_special) / real_kernel_norm # svm evaluation if x_test is not None: logger.info("Start classification") time_classification_start = time.process_time() x_train_nystrom_embedding = nystrom_transformation(x_train, U_centroids, metric, centroids_norm, None, gamma=gamma) x_test_nystrom_embedding = nystrom_transformation(x_test, U_centroids, metric, centroids_norm, None, gamma=gamma) linear_svc_clf = LinearSVC(class_weight="balanced") linear_svc_clf.fit(x_train_nystrom_embedding, y_train) predictions = linear_svc_clf.predict(x_test_nystrom_embedding) time_classification_stop = time.process_time() if paraman["--kddcup04"]: # compute recall: nb_true_positive/real_nb_positive recall = np.sum(predictions[y_test == 1])/np.sum(y_test[y_test == 1]) # compute precision: nb_true_positive/nb_positive precision = np.sum(predictions[y_test == 1])/np.sum(predictions[predictions==1]) f1 = 2 * precision * recall / (precision + recall) accuracy_nystrom_svm = f1 else: accuracy_nystrom_svm = np.sum(predictions == y_test) / y_test.shape[0] delta_time_classification = time_classification_stop - time_classification_start else: accuracy_nystrom_svm = None delta_time_classification = None nystrom_results = { "nystrom_build_time": nystrom_build_time, "nystrom_inference_time": nystrom_inference_time, "nystrom_sampled_error_reconstruction": sampled_froebenius_norm, "nystrom_sampled_error_reconstruction_uniform": sampled_froebenius_norm_uniform, "nystrom_svm_accuracy": accuracy_nystrom_svm, "nystrom_svm_time": delta_time_classification } resprinter.add(nystrom_results)
def make_nystrom_evaluation(x_train, y_train, x_test, y_test, U_centroids): """ Evaluation Nystrom construction time and approximation precision. The approximation is based on a subsample of size n_sample of the input data set. :param x_train: Input dataset as ndarray. :param U_centroids: The matrix of centroids as ndarray or SparseFactor object :param n_sample: The number of sample to take into account in the reconstruction (can't be too large) :return: """ def prepare_nystrom(landmarks, landmarks_norm): basis_kernel_W = special_rbf_kernel(landmarks, landmarks, gamma, landmarks_norm, landmarks_norm) U, S, V = np.linalg.svd(basis_kernel_W) S = np.maximum(S, 1e-12) normalization_ = np.dot(U / np.sqrt(S), V) return normalization_ def nystrom_transformation(x_input, landmarks, p_metric, landmarks_norm, x_input_norm): nystrom_embedding = special_rbf_kernel(landmarks, x_input, gamma, landmarks_norm, x_input_norm).T @ p_metric return nystrom_embedding n_sample = paraman["--nystrom"] if n_sample > x_train.shape[0]: logger.warning( "Batch size for nystrom evaluation is bigger than data size. {} > {}. Using " "data size instead.".format(n_sample, x_train.shape[0])) n_sample = x_train.shape[0] paraman["--nystrom"] = n_sample # Compute euristic gamma as the mean of euclidian distance between example gamma = compute_euristic_gamma(x_train) log_memory_usage( "Memory after euristic gamma computation in make_nystrom_evaluation") # precompute the centroids norm for later use (optimization) centroids_norm = get_squared_froebenius_norm_line_wise(U_centroids) # centroids_norm = None indexes_samples = np.random.permutation(x_train.shape[0])[:n_sample] sample = x_train[indexes_samples] samples_norm = None log_memory_usage( "Memory after sample selection in make_nystrom_evaluation") ######################## # Nystrom on centroids # ######################## logger.info("Build Nystrom on centroids") ## TIME: nystrom build time # nystrom build time is Nystrom preparation time for later use. ## START nystrom_build_start_time = time.process_time() metric = prepare_nystrom(U_centroids, centroids_norm) nystrom_build_stop_time = time.process_time() log_memory_usage("Memory after SVD computation in make_nystrom_evaluation") # STOP nystrom_build_time = nystrom_build_stop_time - nystrom_build_start_time ## TIME: nystrom inference time # Nystrom inference time is the time for Nystrom transformation for all the samples. ## START nystrom_inference_time_start = time.process_time() nystrom_embedding = nystrom_transformation(sample, U_centroids, metric, centroids_norm, samples_norm) nystrom_approx_kernel_value = nystrom_embedding @ nystrom_embedding.T nystrom_inference_time_stop = time.process_time() log_memory_usage( "Memory after kernel matrix approximation in make_nystrom_evaluation") ## STOP nystrom_inference_time = (nystrom_inference_time_stop - nystrom_inference_time_start) / n_sample ################################################################ ###################### # Nystrom on uniform # ###################### logger.info("Build Nystrom on uniform sampling") indexes_uniform_samples = np.random.permutation( x_train.shape[0])[:U_centroids.shape[0]] uniform_sample = x_train[indexes_uniform_samples] uniform_sample_norm = None log_memory_usage( "Memory after uniform sample selection in make_nystrom_evaluation") metric_uniform = prepare_nystrom(uniform_sample, uniform_sample_norm) log_memory_usage( "Memory after SVD computation in uniform part of make_nystrom_evaluation" ) nystrom_embedding_uniform = nystrom_transformation(sample, uniform_sample, metric_uniform, uniform_sample_norm, samples_norm) nystrom_approx_kernel_value_uniform = nystrom_embedding_uniform @ nystrom_embedding_uniform.T ################################################################# ############### # Real Kernel # ############### logger.info("Compute real kernel matrix") real_kernel = special_rbf_kernel(sample, sample, gamma, samples_norm, samples_norm) real_kernel_norm = np.linalg.norm(real_kernel) log_memory_usage( "Memory after real kernel computation in make_nystrom_evaluation") ################################################################ #################### # Error evaluation # #################### sampled_froebenius_norm = np.linalg.norm(nystrom_approx_kernel_value - real_kernel) / real_kernel_norm sampled_froebenius_norm_uniform = np.linalg.norm( nystrom_approx_kernel_value_uniform - real_kernel) / real_kernel_norm # svm evaluation if x_test is not None: logger.info("Start classification") time_classification_start = time.process_time() x_train_nystrom_embedding = nystrom_transformation( x_train, U_centroids, metric, centroids_norm, None) x_test_nystrom_embedding = nystrom_transformation( x_test, U_centroids, metric, centroids_norm, None) linear_svc_clf = LinearSVC() linear_svc_clf.fit(x_train_nystrom_embedding, y_train) accuracy_nystrom_svm = linear_svc_clf.score(x_test_nystrom_embedding, y_test) time_classification_stop = time.process_time() delta_time_classification = time_classification_stop - time_classification_start else: accuracy_nystrom_svm = None delta_time_classification = None nystrom_results = { "nystrom_build_time": nystrom_build_time, "nystrom_inference_time": nystrom_inference_time, "nystrom_sampled_error_reconstruction": sampled_froebenius_norm, "nystrom_sampled_error_reconstruction_uniform": sampled_froebenius_norm_uniform, "nystrom_svm_accuracy": accuracy_nystrom_svm, "nystrom_svm_time": delta_time_classification } resprinter.add(nystrom_results)