コード例 #1
0
ファイル: test_common.py プロジェクト: Saikrishna41/polylearn
def test_augment():
    # The following linear separable dataset cannot be modeled with just an FM
    X_evil = np.array([[-1, -1], [1, 1]])
    y_evil = np.array([-1, 1])
    clf = FactorizationMachineClassifier(fit_linear=False, fit_lower=None,
                                         random_state=0)
    clf.fit(X_evil, y_evil)
    assert_equal(0.5, clf.score(X_evil, y_evil))

    # However, by adding a dummy feature (a column of all ones), the linear
    # effect can be captured.
    clf = FactorizationMachineClassifier(fit_linear=False, fit_lower='augment',
                                         random_state=0)
    clf.fit(X_evil, y_evil)
    assert_equal(1.0, clf.score(X_evil, y_evil))
コード例 #2
0
def check_classification_losses(loss, degree):
    y = np.sign(_poly_predict(X, P, lams, kernel="anova", degree=degree))
    clf = FactorizationMachineClassifier(degree=degree, loss=loss, beta=1e-3,
                                         fit_lower=None, fit_linear=False,
                                         tol=1e-3, random_state=0)
    clf.fit(X, y)
    assert_equal(1.0, clf.score(X, y))
コード例 #3
0
def test_augment():
    # The following linear separable dataset cannot be modeled with just an FM
    X_evil = np.array([[-1, -1], [1, 1]])
    y_evil = np.array([-1, 1])
    clf = FactorizationMachineClassifier(fit_linear=False,
                                         fit_lower=None,
                                         random_state=0)
    clf.fit(X_evil, y_evil)
    assert_equal(0.5, clf.score(X_evil, y_evil))

    # However, by adding a dummy feature (a column of all ones), the linear
    # effect can be captured.
    clf = FactorizationMachineClassifier(fit_linear=False,
                                         fit_lower='augment',
                                         random_state=0)
    clf.fit(X_evil, y_evil)
    assert_equal(1.0, clf.score(X_evil, y_evil))