def test_ensemble_nystrom_full_prec_one_learner(): # test if keep all the dimensions is the nystrom kernel matrix equals to the exact kernel n_sample = 150 n_feat = n_sample input_val1 = torch.DoubleTensor( np.random.normal(size=[n_sample, n_feat])).double() input_val2 = input_val1 # input_val2 = torch.DoubleTensor(np.random.normal(size=[n_sample - 1, n_feat] ) ).double() # get exact gaussian kernel kernel = GaussianKernel(sigma=10.0) kernel_mat = kernel.get_kernel_matrix(input_val1, input_val2) # nystrom method approx = Nystrom(n_feat, kernel=kernel) approx.setup(input_val1) feat = approx.get_feat(input_val1) approx_kernel_mat = approx.get_kernel_matrix(input_val1, input_val2) # ensembleed nystrom method approx_ensemble = EnsembleNystrom(n_feat, n_learner=1, kernel=kernel) approx_ensemble.setup(input_val1) feat_ensemble = approx_ensemble.get_feat(input_val1) approx_kernel_mat_ensemble = approx_ensemble.get_kernel_matrix( input_val1, input_val2) np.testing.assert_array_almost_equal( np.sum(feat.cpu().numpy()**2), np.sum(feat_ensemble.cpu().numpy()**2)) np.testing.assert_array_almost_equal( np.sum(approx_kernel_mat.cpu().numpy()**2), np.sum(approx_kernel_mat_ensemble.cpu().numpy()**2)) print("single learner ensembled nystrom test passed!")
def test_ensemble_nystrom_full_prec_three_learner(): # test if keep all the dimensions is the nystrom kernel matrix equals to the exact kernel n_sample = 150 n_feat = n_sample input_val1 = torch.DoubleTensor( np.random.normal(size=[n_sample, n_feat])).double() input_val2 = input_val1 # input_val2 = torch.DoubleTensor(np.random.normal(size=[n_sample - 1, n_feat] ) ).double() # get exact gaussian kernel kernel = GaussianKernel(sigma=10.0) kernel_mat = kernel.get_kernel_matrix(input_val1, input_val2) # nystrom method approx = Nystrom(n_feat, kernel=kernel) approx.setup(input_val1) feat = approx.get_feat(input_val1) approx_kernel_mat = approx.get_kernel_matrix(input_val1, input_val2) # ensembleed nystrom method approx_ensemble = EnsembleNystrom(n_feat, n_learner=3, kernel=kernel) approx_ensemble.setup(input_val1) feat_ensemble = approx_ensemble.get_feat(input_val1) assert feat_ensemble.size(0) == n_sample assert feat_ensemble.size(1) == n_feat approx_kernel_mat_ensemble = approx_ensemble.get_kernel_matrix( input_val1, input_val2) print("single learner ensembled nystrom test passed!")
def test_ensemble_nystrom_low_prec(): # test if keep all the dimensions is the nystrom kernel matrix equals to the exact kernel n_sample = 150 n_feat = n_sample input_val1 = torch.DoubleTensor( np.random.normal(size=[n_sample, n_feat])).double() input_val2 = input_val1 # input_val2 = torch.DoubleTensor(np.random.normal(size=[n_sample - 1, n_feat] ) ).double() # get exact gaussian kernel kernel = GaussianKernel(sigma=10.0) kernel_mat = kernel.get_kernel_matrix(input_val1, input_val2) # setup quantizer quantizer = Quantizer(4, torch.min(input_val1), torch.max(input_val1), rand_seed=2, use_cuda=False) # nystrom method approx = Nystrom(n_feat, kernel=kernel) approx.setup(input_val1) feat = approx.get_feat(input_val1) approx_kernel_mat = approx.get_kernel_matrix(input_val1, input_val2, quantizer, quantizer) # ensembleed nystrom method approx_ensemble = EnsembleNystrom(n_feat, n_learner=1, kernel=kernel) approx_ensemble.setup(input_val1) feat_ensemble = approx_ensemble.get_feat(input_val1) approx_kernel_mat_ensemble = approx_ensemble.get_kernel_matrix( input_val1, input_val2, quantizer, quantizer, consistent_quant_seed=True) approx_kernel_mat_ensemble = approx_ensemble.get_kernel_matrix( input_val1, input_val2, quantizer, quantizer, consistent_quant_seed=True) print("single learner ensembled nystrom quantizerd version test passed!")
shuffle=False) # setup gaussian kernel n_input_feat = X_train.shape[1] kernel = GaussianKernel(sigma=args.kernel_sigma) if args.approx_type == "exact": print("exact kernel mode") # raise Exception("SGD based exact kernel is not implemented yet!") kernel_approx = kernel quantizer = None elif args.approx_type == "nystrom": print("fp nystrom mode") kernel_approx = Nystrom(args.n_feat, kernel=kernel, rand_seed=args.random_seed) kernel_approx.setup(X_train) quantizer = None elif args.approx_type == "ensemble_nystrom": print("ensembled nystrom mode with ", args.n_ensemble_nystrom, "learner") kernel_approx = EnsembleNystrom(args.n_feat, n_learner=args.n_ensemble_nystrom, kernel=kernel, rand_seed=args.random_seed) kernel_approx.setup(X_train) if args.do_fp_feat: quantizer = None else: # decide on the range of representation from training sample based features train_feat = kernel_approx.get_feat(X_train) min_val = torch.min(train_feat)