예제 #1
0
 def __init__(self):
     args = {
         LearnerEstimator: (OLS(), FoldIndex()),
         LayerEnsemble: (make_group(FoldIndex(), ESTIMATORS,
                                    PREPROCESSING), ),
         TransformerEstimator: (Scale(), FoldIndex())
     }[Est]
     super(Tmp, self).__init__(*args)
예제 #2
0
def test_data():
    """[Parallel | Learner] Test data"""
    idx = INDEXERS['subsemble']()
    lyr = Layer('layer-scorer').push(make_group(idx, ECM, None))
    for lr in lyr.learners:
        lr.scorer = scorer

    run(lyr, 'fit', X, y, return_preds=True)
    repr = lyr.data.__repr__()
    assert lyr.raw_data
    assert isinstance(lyr.raw_data, list)
    assert isinstance(lyr.data, dict)
    assert repr
    assert 'score' in repr
예제 #3
0
from mlens.estimators import LearnerEstimator, TransformerEstimator, LayerEnsemble
from mlens.externals.sklearn.base import clone

try:
    from sklearn.utils.estimator_checks import check_estimator
    run_sklearn = True
except ImportError:
    check_estimator = None
    run_sklearn = False

data = Data('stack', False, True)
X, y = data.get_data((25, 4), 3)
(F, wf), (P, wp) = data.ground_truth(X, y)

Est = LayerEnsemble
est = LayerEnsemble(make_group(FoldIndex(), ESTIMATORS, PREPROCESSING),
                    dtype=np.float64)


class Tmp(Est):
    """Temporary class

    Wrapper to get full estimator on no-args instantiation. For compatibility
    with older Scikit-learn versions.
    """
    def __init__(self):
        args = {
            LearnerEstimator: (OLS(), FoldIndex()),
            LayerEnsemble: (make_group(FoldIndex(), ESTIMATORS,
                                       PREPROCESSING), ),
            TransformerEstimator: (Scale(), FoldIndex())
예제 #4
0
To parallelize the implementation, we can use the :class:`Layer` class. A layer is
a handle that will run any number of :class:`Group` instances attached to it in parallel. Each
group in turn is a wrapper around a ``indexer-transformers-estimators`` triplet.

Basics
------

So, to fit our two learners in parallel, we first need a :class:`Group` object to
handle them.
"""
from mlens.parallel import Layer, Group, make_group, run
from mlens.utils.dummy import OLS, Scale
from mlens.index import FoldIndex

indexer = FoldIndex(folds=2)
group = make_group(indexer, [OLS(1), OLS(2)], None)

############################################################################
# This ``group`` object is now a complete description of how to fit our two
# learners using the prescribed indexing method.
#
# To train the estimators, we need feed the group to a :class:`Layer` instance:

import numpy as np

np.random.seed(2)

X = np.arange(20).reshape(10, 2)
y = np.random.rand(10)

layer = Layer(stack=group)
예제 #5
0
ESTIMATORS_PROBA_2 = {'sc2': [('offs2', LogisticRegression(offset=2)),
                             ('null2', LogisticRegression())],
                      'no2': [('offs2', LogisticRegression(offset=2)),
                             ('null2', LogisticRegression())]}


def scorer(p, y): return np.mean(p - y)


data = Data('stack', True, True)

X, y = data.get_data((25, 4), 3)

idx1 = INDEXERS['stack']()
g1 = make_group(
    idx1, ESTIMATORS_PROBA_1, PREPROCESSING_1,
    learner_kwargs={'proba': True, 'verbose': True},
    transformer_kwargs={'verbose': True})

idx2 = INDEXERS['subsemble']()
g2 = make_group(
    idx2, ESTIMATORS_PROBA_2, PREPROCESSING_2,
    learner_kwargs={'proba': False, 'verbose': True},
    transformer_kwargs={'verbose': True})

layer = Layer('layer')


def test_push_1():
    """[Parallel | Layer] Testing single push"""
    assert not layer.__stack__