Beispiel #1
0
def test_sgd_regressor_rk_as(loss):
    rng = np.random.RandomState(0)
    transform = RandomKernel(n_components=100,
                             random_state=0,
                             kernel='all_subsets')
    X_trans = transform.fit_transform(X)
    y, coef = generate_target(X_trans, rng, -0.1, 0.1)
    y_train = y[:n_train]
    y_test = y[n_train:]
    _test_regressor(transform, y_train, y_test, X_trans, loss=loss)
Beispiel #2
0
def test_anova_kernel(dist, degree, kernel):
    # compute exact kernel
    gram = anova(X, Y, degree)
    # approximate kernel mapping
    rk_transform = RandomKernel(n_components=1000, random_state=0,
                                kernel=kernel, degree=degree,
                                distribution=dist, p_sparse=0.5)

    X_trans = rk_transform.fit_transform(X)
    Y_trans = rk_transform.transform(Y)
    kernel_approx = np.dot(X_trans, Y_trans.T)
    error =  gram - kernel_approx
    assert np.abs(np.mean(error)) < 0.0001
    assert np.max(error) < 0.001  # nothing too far off
    assert np.mean(error) < 0.0005  # mean is fairly close

    # sparse input
    X_trans_sp = rk_transform.transform(X_sp)
    assert_allclose_dense_sparse(X_trans, X_trans_sp)

    # sparse output
    if dist == "sparse_rademacher":
        rk_transform.dense_output = False
        X_trans_sp = rk_transform.transform(X_sp)
        assert issparse(X_trans_sp)
        assert_allclose_dense_sparse(X_trans, X_trans_sp.toarray())
    else:
        rk_transform.dense_output = False
        X_trans_sp = rk_transform.transform(X_sp)
        assert not issparse(X_trans_sp)
        assert_allclose_dense_sparse(X_trans, X_trans_sp)
Beispiel #3
0
def test_regressor_rk_as(normalize, loss):
    rng = np.random.RandomState(0)
    # approximate kernel mapping
    transformer = RandomKernel(n_components=100, random_state=0,
                               kernel='all_subsets')
    X_trans = transformer.fit_transform(X)
    y, coef = generate_target(X_trans, rng, -0.1, 0.1)
    y_train = y[:n_train]
    y_test = y[n_train:]
    _test_regressor(transformer, X_train, y_train, X_test, y_test, X_trans,
                    normalize=normalize, loss=loss)
Beispiel #4
0
def test_sgd_regressor_rk(loss):
    rng = np.random.RandomState(0)
    for degree in range(2, 5):
        transform = RandomKernel(n_components=100,
                                 random_state=0,
                                 degree=degree)
        X_trans = transform.fit_transform(X)
        y, coef = generate_target(X_trans, rng, -0.1, 0.1)
        y_train = y[:n_train]
        y_test = y[n_train:]
        _test_regressor(transform, y_train, y_test, X_trans, loss=loss)
Beispiel #5
0
def test_sparse_rademacher(p_sparse):
    # approximate kernel mapping
    for p_sparse in [0.9, 0.8, 0.7, 0.6, 0.5]:
        rk_transform = RandomKernel(n_components=1000, random_state=0,
                                    kernel='anova',
                                    distribution='sparse_rademacher',
                                    p_sparse=p_sparse)
        X_trans = rk_transform.fit_transform(X)
        nnz_actual = rk_transform.random_weights_.nnz
        nnz_expected = X.shape[1]*rk_transform.n_components*(1-p_sparse)
        assert_almost_equal(np.abs(1-nnz_actual/nnz_expected), 0, 0.1)
Beispiel #6
0
def test_classifier_rk(normalize, loss, degree):
    rng = np.random.RandomState(0)
    # approximate kernel mapping
    transformer = RandomKernel(n_components=100, random_state=0,
                                degree=degree)
    X_trans = transformer.fit_transform(X)
    y, coef = generate_target(X_trans, rng, -0.1, 0.1)
    y_train = y[:n_train]
    y_test = y[n_train:]
    _test_classifier(transformer, X_train, np.sign(y_train), X_test,
                     np.sign(y_test), X_trans, normalize=normalize,
                     loss=loss)
Beispiel #7
0
def test_compact_random_feature_random_kernel_all_subsets(down):
    # approximate kernel mapping
    transform_up = RandomKernel(kernel='all_subsets',
                                random_state=0,
                                n_components=100)
    transform_down = down(n_components=50, random_state=0)
    X_trans_naive = transform_down.fit_transform(transform_up.fit_transform(X))

    transform_up = RandomKernel(kernel='all_subsets',
                                random_state=0,
                                n_components=100)
    transform_down = down(n_components=50, random_state=0)
    transformer = CompactRandomFeature(transformer_up=transform_up,
                                       transformer_down=transform_down)
    X_trans = transformer.fit_transform(X)
    assert_allclose(X_trans_naive, X_trans)
Beispiel #8
0
def test_sgd_classifier_rk(loss):
    rng = np.random.RandomState(0)
    for degree in range(2, 5):
        transform = RandomKernel(n_components=100,
                                 random_state=0,
                                 degree=degree)
        X_trans = transform.fit_transform(X)
        y, coef = generate_target(X_trans, rng, -0.1, 0.1)
        y_train = y[:n_train]
        y_test = y[n_train:]
        _test_classifier(transform,
                         np.sign(y_train),
                         np.sign(y_test),
                         X_trans,
                         max_iter=100,
                         eta0=.1,
                         loss=loss)
Beispiel #9
0
def test_all_subsets_kernel(dist):
    # compute exact kernel
    p_sparse = 0.5
    kernel = all_subsets(X, Y)
    # approximate kernel mapping
    rk_transform = RandomKernel(n_components=5000, random_state=0,
                                kernel='all_subsets',
                                distribution=dist, p_sparse=p_sparse)
    X_trans = rk_transform.fit_transform(X)
    Y_trans = rk_transform.transform(Y)
    kernel_approx = np.dot(X_trans, Y_trans.T)

    error = kernel - kernel_approx
    if dist == 'sparse_rademacher':
        nnz = rk_transform.random_weights_.nnz
        nnz_expect = np.prod(rk_transform.random_weights_.shape)*p_sparse
        nnz_var = np.sqrt(nnz_expect * (1-p_sparse))
        assert np.abs(nnz-nnz_expect) < 3*nnz_var
    assert np.abs(np.mean(error)) < 0.01
    assert np.max(error) < 0.1  # nothing too far off
    assert np.mean(error) < 0.05  # mean is fairly close

    X_trans_sp = rk_transform.transform(X_sp)
    assert_allclose_dense_sparse(X_trans, X_trans_sp)
Beispiel #10
0
    if lkrf.remove_bases():
        X_trans_removed = lkrf.transform(X)
        assert_almost_equal(X_trans_removed.shape[1], n_nz)
        indices = np.nonzero(lkrf.importance_weights_)[0]
        assert_almost_equal(X_trans_removed, X_trans[:, indices])
 

params =  [
    RBFSampler(n_components=128, random_state=0),   
    RandomFourier(n_components=128, random_state=0),   
    RandomFourier(n_components=128, random_state=0, use_offset=True),
    OrthogonalRandomFeature(n_components=128, random_state=0), 
    OrthogonalRandomFeature(n_components=128, random_state=0,
                            use_offset=True),
    RandomMaclaurin(random_state=0),
    RandomKernel(random_state=0)
]


@pytest.mark.parametrize("transformer", params)
def test_lkrf_chi2(transformer, rho=1):
    _test_learning_kernel_with_random_feature('chi2', transformer, rho)


def test_lkrf_chi2_origin():
    _test_learning_kernel_with_random_feature('chi2_origin')


@pytest.mark.parametrize("transformer", params)
def test_lkrf_kl(transformer):
    _test_learning_kernel_with_random_feature('kl', transformer, rho=0.01)