Пример #1
0
def test_pytorch_gaussian_kernel():
    n_feat = 10
    input_val = np.ones([2, n_feat])
    input_val[0, :] *= 1
    input_val[0, :] *= 2
    # get exact gaussian kernel
    kernel = GaussianKernel(sigma=2.0)
    kernel_mat = kernel.get_kernel_matrix(input_val, input_val)
    kernel_mat_torch = kernel.get_kernel_matrix(torch.Tensor(input_val),
                                                torch.Tensor(input_val))
    np.testing.assert_array_almost_equal(kernel_mat.cpu().numpy(),
                                         kernel_mat_torch.cpu().numpy())
    print("gaussian kernel pytorch version test passed!")
Пример #2
0
def test_ensemble_nystrom_full_prec_three_learner():
    # test if keep all the dimensions is the nystrom kernel matrix equals to the exact kernel
    n_sample = 150
    n_feat = n_sample
    input_val1 = torch.DoubleTensor(
        np.random.normal(size=[n_sample, n_feat])).double()
    input_val2 = input_val1
    # input_val2  = torch.DoubleTensor(np.random.normal(size=[n_sample - 1, n_feat] ) ).double()
    # get exact gaussian kernel
    kernel = GaussianKernel(sigma=10.0)
    kernel_mat = kernel.get_kernel_matrix(input_val1, input_val2)

    # nystrom method
    approx = Nystrom(n_feat, kernel=kernel)
    approx.setup(input_val1)
    feat = approx.get_feat(input_val1)
    approx_kernel_mat = approx.get_kernel_matrix(input_val1, input_val2)

    # ensembleed nystrom method
    approx_ensemble = EnsembleNystrom(n_feat, n_learner=3, kernel=kernel)
    approx_ensemble.setup(input_val1)
    feat_ensemble = approx_ensemble.get_feat(input_val1)
    assert feat_ensemble.size(0) == n_sample
    assert feat_ensemble.size(1) == n_feat
    approx_kernel_mat_ensemble = approx_ensemble.get_kernel_matrix(
        input_val1, input_val2)
    print("single learner ensembled nystrom test passed!")
Пример #3
0
def test_ensemble_nystrom_full_prec_one_learner():
    # test if keep all the dimensions is the nystrom kernel matrix equals to the exact kernel
    n_sample = 150
    n_feat = n_sample
    input_val1 = torch.DoubleTensor(
        np.random.normal(size=[n_sample, n_feat])).double()
    input_val2 = input_val1
    # input_val2  = torch.DoubleTensor(np.random.normal(size=[n_sample - 1, n_feat] ) ).double()
    # get exact gaussian kernel
    kernel = GaussianKernel(sigma=10.0)
    kernel_mat = kernel.get_kernel_matrix(input_val1, input_val2)

    # nystrom method
    approx = Nystrom(n_feat, kernel=kernel)
    approx.setup(input_val1)
    feat = approx.get_feat(input_val1)
    approx_kernel_mat = approx.get_kernel_matrix(input_val1, input_val2)

    # ensembleed nystrom method
    approx_ensemble = EnsembleNystrom(n_feat, n_learner=1, kernel=kernel)
    approx_ensemble.setup(input_val1)
    feat_ensemble = approx_ensemble.get_feat(input_val1)
    approx_kernel_mat_ensemble = approx_ensemble.get_kernel_matrix(
        input_val1, input_val2)
    np.testing.assert_array_almost_equal(
        np.sum(feat.cpu().numpy()**2), np.sum(feat_ensemble.cpu().numpy()**2))

    np.testing.assert_array_almost_equal(
        np.sum(approx_kernel_mat.cpu().numpy()**2),
        np.sum(approx_kernel_mat_ensemble.cpu().numpy()**2))
    print("single learner ensembled nystrom test passed!")
Пример #4
0
def test_kernel_ridge_regression2():
    '''
  We test the linear kernel case and gaussian kernel case
  '''
    n_feat = 10
    n_rff_feat = 1000
    X_train = np.ones([2, n_feat])
    X_train[0, :] *= 1
    X_train[0, :] *= 2
    Y_train = np.ones([2, 1])
    kernel = GaussianKernel(sigma=2.0)
    kernel = RFF(n_rff_feat, n_feat, kernel)
    use_cuda = torch.cuda.is_available()
    kernel.torch(cuda=use_cuda)
    reg_lambda = 1.0
    regressor = KernelRidgeRegression(kernel, reg_lambda=reg_lambda)
    if use_cuda:
        regressor.fit(torch.cuda.DoubleTensor(X_train),
                      torch.cuda.DoubleTensor(Y_train))
    else:
        regressor.fit(torch.DoubleTensor(X_train), torch.DoubleTensor(Y_train))
    # compare the two ways of calculating feature weights as sanity check
    # feature weight using the approach inside KernelRidgeRegression
    if use_cuda:
        kernel.get_kernel_matrix(torch.cuda.DoubleTensor(X_train),
                                 torch.cuda.DoubleTensor(X_train))
    else:
        kernel.get_kernel_matrix(torch.DoubleTensor(X_train),
                                 torch.DoubleTensor(X_train))
    # print kernel.rff_x2.size(), regressor.alpha.size()
    w1 = torch.mm(torch.transpose(kernel.rff_x2, 0, 1), regressor.alpha)
    # print w1.size()
    # feature weight using alternative way of calculation
    if use_cuda:
        val = torch.inverse( (regressor.reg_lambda * torch.eye(n_rff_feat).double().cuda() \
          + torch.mm(torch.transpose(kernel.rff_x1, 0, 1), kernel.rff_x1) ) )
    else:
        val = torch.inverse( (regressor.reg_lambda * torch.eye(n_rff_feat).double() \
          + torch.mm(torch.transpose(kernel.rff_x1, 0, 1), kernel.rff_x1) ) )
    val = torch.mm(val, torch.transpose(kernel.rff_x2, 0, 1))
    if use_cuda:
        w2 = torch.mm(val, torch.cuda.DoubleTensor(Y_train))
    else:
        w2 = torch.mm(val, torch.DoubleTensor(Y_train))
    np.testing.assert_array_almost_equal(w1.cpu().numpy(), w2.cpu().numpy())
    # print(w1.cpu().numpy().ravel()[-10:-1], w2.cpu().numpy().ravel()[-10:-1] )
    print("kernel ridge regression test2 passed!")
Пример #5
0
def test_rff_generation():
    n_feat = 10
    n_rff_feat = 1000000
    input_val = np.ones([2, n_feat])
    input_val[0, :] *= 1
    input_val[0, :] *= 2
    # get exact gaussian kernel
    kernel = GaussianKernel(sigma=2.0)
    kernel_mat = kernel.get_kernel_matrix(input_val, input_val)
    # get RFF approximate kernel matrix
    rff = RFF(n_rff_feat, n_feat, kernel=kernel)
    rff.get_gaussian_wb()
    approx_kernel_mat = rff.get_kernel_matrix(input_val, input_val)
    np.testing.assert_array_almost_equal(approx_kernel_mat.cpu().numpy(),
                                         kernel_mat.cpu().numpy(),
                                         decimal=3)
    print("rff generation test passed!")
Пример #6
0
def test_ensemble_nystrom_low_prec():
    # test if keep all the dimensions is the nystrom kernel matrix equals to the exact kernel
    n_sample = 150
    n_feat = n_sample
    input_val1 = torch.DoubleTensor(
        np.random.normal(size=[n_sample, n_feat])).double()
    input_val2 = input_val1
    # input_val2  = torch.DoubleTensor(np.random.normal(size=[n_sample - 1, n_feat] ) ).double()
    # get exact gaussian kernel
    kernel = GaussianKernel(sigma=10.0)
    kernel_mat = kernel.get_kernel_matrix(input_val1, input_val2)

    # setup quantizer
    quantizer = Quantizer(4,
                          torch.min(input_val1),
                          torch.max(input_val1),
                          rand_seed=2,
                          use_cuda=False)

    # nystrom method
    approx = Nystrom(n_feat, kernel=kernel)
    approx.setup(input_val1)
    feat = approx.get_feat(input_val1)
    approx_kernel_mat = approx.get_kernel_matrix(input_val1, input_val2,
                                                 quantizer, quantizer)

    # ensembleed nystrom method
    approx_ensemble = EnsembleNystrom(n_feat, n_learner=1, kernel=kernel)
    approx_ensemble.setup(input_val1)
    feat_ensemble = approx_ensemble.get_feat(input_val1)
    approx_kernel_mat_ensemble = approx_ensemble.get_kernel_matrix(
        input_val1,
        input_val2,
        quantizer,
        quantizer,
        consistent_quant_seed=True)
    approx_kernel_mat_ensemble = approx_ensemble.get_kernel_matrix(
        input_val1,
        input_val2,
        quantizer,
        quantizer,
        consistent_quant_seed=True)

    print("single learner ensembled nystrom quantizerd version test passed!")
Пример #7
0
def test_nystrom_full():
    # test if keep all the dimensions is the nystrom kernel matrix equals to the exact kernel
    n_sample = 15
    n_feat = n_sample
    input_val1 = torch.Tensor(
        np.random.normal(size=[n_sample, n_feat])).double()
    input_val2 = torch.Tensor(np.random.normal(size=[n_sample -
                                                     1, n_feat])).double()
    # get exact gaussian kernel
    kernel = GaussianKernel(sigma=np.random.normal())
    kernel_mat = kernel.get_kernel_matrix(input_val1, input_val2)

    approx = Nystrom(n_feat, kernel=kernel)
    approx.setup(input_val1)
    approx_kernel_mat = approx.get_kernel_matrix(input_val1, input_val2)

    np.testing.assert_array_almost_equal(kernel_mat.cpu().numpy(),
                                         approx_kernel_mat.cpu().numpy())
    print("nystrom full dimension test passed!")