def fit(self, X, y):
        """Fit factorization machine to training data.

        Parameters
        ----------
        X : array-like or sparse, shape = [n_samples, n_features]
            Training vectors, where n_samples is the number of samples
            and n_features is the number of features.

        y : array-like, shape = [n_samples]
            Target values.

        Returns
        -------
        self : Estimator
            Returns self.
        """
        if self.degree > 3:
            raise ValueError("FMs with degree >3 not yet supported.")

        X, y = self._check_X_y(X, y)
        X = self._augment(X)
        n_features = X.shape[1]  # augmented
        X_col_norms = row_norms(X.T, squared=True)
        dataset = get_dataset(X, order="fortran")
        rng = check_random_state(self.random_state)
        loss_obj = self._get_loss(self.loss)

        if not (self.warm_start and hasattr(self, 'w_')):
            self.w_ = np.zeros(n_features, dtype=np.double)

        if self.fit_lower == 'explicit':
            n_orders = self.degree - 1
        else:
            n_orders = 1

        if not (self.warm_start and hasattr(self, 'P_')):
            self.P_ = 0.01 * rng.randn(n_orders, self.n_components, n_features)

        if not (self.warm_start and hasattr(self, 'lams_')):
            if self.init_lambdas == 'ones':
                self.lams_ = np.ones(self.n_components)
            elif self.init_lambdas == 'random_signs':
                self.lams_ = np.sign(rng.randn(self.n_components))
            else:
                raise ValueError("Lambdas must be initialized as ones "
                                 "(init_lambdas='ones') or as random "
                                 "+/- 1 (init_lambdas='random_signs').")

        y_pred = self._get_output(X)

        converged, self.n_iter_ = _cd_direct_ho(
            self.P_, self.w_, dataset, X_col_norms, y, y_pred, self.lams_,
            self.degree, self.alpha, self.beta, self.fit_linear,
            self.fit_lower == 'explicit', loss_obj, self.max_iter, self.tol,
            self.verbose)
        if not converged:
            warnings.warn("Objective did not converge. Increase max_iter.")

        return self
    def fit(self, X, y):
        self._set_label_transformers(y)
        y = np.asfortranarray(self.label_binarizer_.transform(y),
                              dtype=np.float64)

        if self.eta is None or self.eta == 'auto':
            eta = get_auto_step_size(get_dataset(X, order="c"), self.alpha,
                                     self.loss, self.is_saga)
        else:
            eta = self.eta

        if self.alpha * eta == 1:
            # to match the beaviour of SAGA
            # in this case SAGA decreases slightly eta
            eta *= 0.9

        loss = self._get_loss(self.loss)
        self.penalty = self._get_penalty(self.penalty)

        if not self.is_saga and self.penalty is not None:
            raise ValueError("PySAGClassifier only accepts l2 penalty. Please "
                             "use `saga=True` or PySAGAClassifier.")

        if self.is_saga:
            self.coef_ = _fit_saga(X, y, eta, self.alpha, loss, self.penalty,
                                   self.max_iter, self.rng)
        else:
            self.coef_ = _fit_sag(X, y, eta, self.alpha, loss, self.max_iter,
                                  self.rng)
Example #3
0
    def fit(self, X, y):
        """Fit factorization machine to training data.

        Parameters
        ----------
        X : array-like or sparse, shape = [n_samples, n_features]
            Training vectors, where n_samples is the number of samples
            and n_features is the number of features.

        y : array-like, shape = [n_samples]
            Target values.

        Returns
        -------
        self : Estimator
            Returns self.
        """
        if self.degree > 3:
            raise ValueError("FMs with degree >3 not yet supported.")

        X, y = self._check_X_y(X, y)
        X = self._augment(X)
        n_features = X.shape[1]  # augmented
        X_col_norms = row_norms(X.T, squared=True)
        dataset = get_dataset(X, order="fortran")
        rng = check_random_state(self.random_state)
        loss_obj = self._get_loss(self.loss)

        if not (self.warm_start and hasattr(self, 'w_')):
            self.w_ = np.zeros(n_features, dtype=np.double)

        if self.fit_lower == 'explicit':
            n_orders = self.degree - 1
        else:
            n_orders = 1

        if not (self.warm_start and hasattr(self, 'P_')):
            self.P_ = 0.01 * rng.randn(n_orders, self.n_components, n_features)

        if not (self.warm_start and hasattr(self, 'lams_')):
            if self.init_lambdas == 'ones':
                self.lams_ = np.ones(self.n_components)
            elif self.init_lambdas == 'random_signs':
                self.lams_ = np.sign(rng.randn(self.n_components))
            else:
                raise ValueError("Lambdas must be initialized as ones "
                                 "(init_lambdas='ones') or as random "
                                 "+/- 1 (init_lambdas='random_signs').")

        y_pred = self._get_output(X)

        converged = _cd_direct_ho(self.P_, self.w_, dataset, X_col_norms, y,
                                  y_pred, self.lams_, self.degree, self.alpha,
                                  self.beta, self.fit_linear,
                                  self.fit_lower == 'explicit', loss_obj,
                                  self.max_iter, self.tol, self.verbose)
        if not converged:
            warnings.warn("Objective did not converge. Increase max_iter.")

        return self
Example #4
0
    def fit(self, X, y):
        self._set_label_transformers(y)
        y = np.asfortranarray(self.label_binarizer_.transform(y),
                              dtype=np.float64)

        if self.eta is None or self.eta == 'auto':
            eta = get_auto_step_size(
                get_dataset(X, order="c"), self.alpha, self.loss, self.is_saga)
        else:
            eta = self.eta

        if self.alpha * eta == 1:
            # to match the beaviour of SAGA
            # in this case SAGA decreases slightly eta
            eta *= 0.9

        loss = self._get_loss(self.loss)
        self.penalty = self._get_penalty(self.penalty)

        if not self.is_saga and self.penalty is not None:
            raise ValueError("PySAGClassifier only accepts l2 penalty. Please "
                             "use `saga=True` or PySAGAClassifier.")

        if self.is_saga:
            self.coef_ = _fit_saga(X, y, eta, self.alpha, loss,
                                   self.penalty, self.max_iter, self.rng)
        else:
            self.coef_ = _fit_sag(X, y, eta, self.alpha, loss,
                                  self.max_iter, self.rng)
def test_sag_dataset(SAG_, bin_train_data):
    # make sure SAG/SAGA accept a Dataset object as argument
    X_bin, y_bin = bin_train_data
    clf1 = SAG_(eta=1e-3, max_iter=20, verbose=0, random_state=0)
    clf2 = SAG_(eta=1e-3, max_iter=20, verbose=0, random_state=0)
    clf1.fit(get_dataset(X_bin, order='C'), y_bin)
    clf2.fit(X_bin, y_bin)
    np.testing.assert_almost_equal(clf1.coef_, clf2.coef_)
Example #6
0
def test_sag_dataset():
    # make sure SAG/SAGA accept a Dataset object as argument
    for SAG_ in (SAGAClassifier, SAGClassifier, SAGRegressor, SAGARegressor):
        clf1 = SAG_(eta=1e-3, max_iter=20, verbose=0, random_state=0)
        clf2 = SAG_(eta=1e-3, max_iter=20, verbose=0, random_state=0)
        clf1.fit(get_dataset(X_bin, order='C'), y_bin)
        clf2.fit(X_bin, y_bin)
        assert_almost_equal(clf1.coef_, clf2.coef_)
Example #7
0
def test_sag_dataset():
    # make sure SAG/SAGA accept a Dataset object as argument
    for SAG_ in (SAGAClassifier, SAGClassifier, SAGRegressor, SAGARegressor):
        clf1 = SAG_(eta=1e-3, max_iter=20, verbose=0, random_state=0)
        clf2 = SAG_(eta=1e-3, max_iter=20, verbose=0, random_state=0)
        clf1.fit(get_dataset(X_bin, order='C'), y_bin)
        clf2.fit(X_bin, y_bin)
        np.testing.assert_almost_equal(clf1.coef_, clf2.coef_)
Example #8
0
    def _predict(self, X):
        if not hasattr(self, "U_"):
            raise NotFittedError("Estimator not fitted.")

        X = check_array(X, accept_sparse='csc', dtype=np.double)
        X = self._augment(X)
        X = get_dataset(X, order='fortran')
        return _lifted_predict(self.U_, X)
Example #9
0
def test_sparse_dot():
    for data in (bin_dense, bin_csr):
        K = linear_kernel(data)
        K2 = np.zeros_like(K)
        ds = get_dataset(data)

        for i in xrange(data.shape[0]):
            for j in xrange(i, data.shape[0]):
                K2[i, j] = sparse_dot(ds, i, j)
                K2[j, i] = K[i, j]

    assert_array_almost_equal(K, K2)
def test_sparse_dot(data, request):
    X, _ = request.getfixturevalue(data)
    K = linear_kernel(X)
    K2 = np.zeros_like(K)
    ds = get_dataset(X)

    for i in range(X.shape[0]):
        for j in range(i, X.shape[0]):
            K2[i, j] = sparse_dot(ds, i, j)
            K2[j, i] = K[i, j]

    np.testing.assert_array_almost_equal(K, K2)
Example #11
0
def test_sparse_dot():
    for data in (bin_dense, bin_csr):
        K = linear_kernel(data)
        K2 = np.zeros_like(K)
        ds = get_dataset(data)

        for i in xrange(data.shape[0]):
            for j in xrange(i, data.shape[0]):
                K2[i, j] = sparse_dot(ds, i, j)
                K2[j, i] = K[i, j]

    assert_array_almost_equal(K, K2)
Example #12
0
def _fit_linear(X, y, alpha, n_iter, loss, callback=None):
    n_samples, n_features = X.shape
    X_col_norm_sq = (X ** 2).sum(axis=0)
    X_ds = get_dataset(X, order='fortran')
    w_init = np.zeros(n_features)
    y_pred = np.zeros(n_samples)

    for _ in range(n_iter):
        viol = _cd_linear_epoch(w_init, X_ds, y, y_pred, X_col_norm_sq,
                                alpha, loss)
        if callback is not None:
            callback(w_init, viol)
    return w_init
Example #13
0
def test_epoch():
    U = rng.randn(*true_U.shape)
    U2 = U.copy()

    viol, lv = _bilinear_cd(U, true_V, X_left, X_right, y, 1.0)

    dataset = get_dataset(X_left, 'fortran')

    # precomputing for cython
    y_pred = _bilinear_forward(U2, true_V, X_left, X_right)
    XrV = safe_sparse_dot(X_right, true_V)
    VtGsq = safe_sparse_dot(XrV.T ** 2, X_left ** 2)
    v2 = _cd_bilinear_epoch(U2, dataset, XrV, y, y_pred, VtGsq, 1.0)

    assert_almost_equal(viol, v2)
    assert_array_almost_equal(U, U2)
Example #14
0
    def fit(self, X, y):
        """Fit polynomial network to training data.

        Parameters
        ----------
        X : array-like or sparse, shape = [n_samples, n_features]
            Training vectors, where n_samples is the number of samples
            and n_features is the number of features.

        y : array-like, shape = [n_samples]
            Target values.

        Returns
        -------
        self : Estimator
            Returns self.
        """
        if self.fit_lower == 'explicit':
            raise NotImplementedError('Explicit fitting of lower orders '
                                      'not yet implemented for polynomial'
                                      'network models.')

        X, y = self._check_X_y(X, y)
        X = self._augment(X)
        n_features = X.shape[1]  # augmented
        dataset = get_dataset(X, order="fortran")
        rng = check_random_state(self.random_state)
        loss_obj = self._get_loss(self.loss)

        if not (self.warm_start and hasattr(self, 'U_')):
            self.U_ = 0.01 * rng.randn(self.degree, self.n_components,
                                       n_features)

        y_pred = _lifted_predict(self.U_, dataset)

        converged, self.n_iter_ = _cd_lifted(self.U_, dataset, y, y_pred,
                                             self.beta, loss_obj,
                                             self.max_iter, self.tol,
                                             self.verbose)

        if not converged:
            warnings.warn("Objective did not converge. Increase max_iter.")

        return self
    def fit(self, X, y):
        """Fit polynomial network to training data.

        Parameters
        ----------
        X : array-like or sparse, shape = [n_samples, n_features]
            Training vectors, where n_samples is the number of samples
            and n_features is the number of features.

        y : array-like, shape = [n_samples]
            Target values.

        Returns
        -------
        self : Estimator
            Returns self.
        """
        if self.fit_lower == 'explicit':
            raise NotImplementedError('Explicit fitting of lower orders '
                                      'not yet implemented for polynomial'
                                      'network models.')

        X, y = self._check_X_y(X, y)
        X = self._augment(X)
        n_features = X.shape[1]  # augmented
        dataset = get_dataset(X, order="fortran")
        rng = check_random_state(self.random_state)
        loss_obj = self._get_loss(self.loss)

        if not (self.warm_start and hasattr(self, 'U_')):
            self.U_ = 0.01 * rng.randn(self.degree, self.n_components,
                                       n_features)

        y_pred = _lifted_predict(self.U_, dataset)

        converged, self.n_iter_ = _cd_lifted(
            self.U_, dataset, y, y_pred, self.beta, loss_obj, self.max_iter,
            self.tol, self.verbose)

        if not converged:
            warnings.warn("Objective did not converge. Increase max_iter.")

        return self
def test_lifted_predict():
    y_ref = _lifted_predict(U, X)
    ds = get_dataset(X, order='fortran')
    y = _ds_lifted_predict(U, ds)
    assert_array_almost_equal(y_ref, y)
Example #17
0
from nose.tools import assert_less_equal, assert_greater_equal
from numpy.testing import assert_array_almost_equal

import numpy as np
from sklearn.utils.validation import assert_all_finite
from polylearn.cd_linear_fast import _cd_linear_epoch
from polylearn.loss_fast import Squared, SquaredHinge, Logistic
from lightning.impl.dataset_fast import get_dataset

rng = np.random.RandomState(0)
X = rng.randn(50, 10)
w_true = rng.randn(10)

y = np.dot(X, w_true)
X_ds = get_dataset(X, order='fortran')
X_col_norm_sq = (X ** 2).sum(axis=0)

n_iter = 100


def _fit_linear(X, y, alpha, n_iter, loss, callback=None):
    n_samples, n_features = X.shape
    X_col_norm_sq = (X ** 2).sum(axis=0)
    X_ds = get_dataset(X, order='fortran')
    w_init = np.zeros(n_features)
    y_pred = np.zeros(n_samples)

    for _ in range(n_iter):
        viol = _cd_linear_epoch(w_init, X_ds, y, y_pred, X_col_norm_sq,
                                alpha, loss)
        if callback is not None:
Example #18
0
from nose.tools import assert_less_equal, assert_greater_equal
from numpy.testing import assert_array_almost_equal

import numpy as np

from polylearn.cd_linear_fast import _cd_linear_epoch
from polylearn.loss_fast import Squared, SquaredHinge, Logistic
from lightning.impl.dataset_fast import get_dataset

rng = np.random.RandomState(0)
X = rng.randn(50, 10)
w_true = rng.randn(10)

y = np.dot(X, w_true)

X_ds = get_dataset(X, order='fortran')
X_col_norm_sq = (X ** 2).sum(axis=0)

n_iter = 100


def test_cd_linear_fit():
    loss = Squared()
    alpha = 1e-5
    w = np.zeros_like(w_true)
    y_pred = np.zeros(X.shape[0])
    loss_vals = []

    for _ in range(n_iter):
        _cd_linear_epoch(w, X_ds, y, y_pred, X_col_norm_sq, alpha,
                         loss)