예제 #1
0
 def test_make_choice_with_instance(self):
     from lale.operators import make_union, make_choice, make_pipeline
     from sklearn.datasets import load_iris
     iris = load_iris()
     X, y = iris.data, iris.target
     tfm = PCA() | Nystroem() | NoOp()
     with self.assertRaises(AttributeError):
         trained = tfm.fit(X, y)
     planned_pipeline1 = (OneHotEncoder | NoOp) >> tfm >> (LogisticRegression | KNeighborsClassifier)
     planned_pipeline2 = (OneHotEncoder | NoOp) >> (PCA | Nystroem) >> (LogisticRegression | KNeighborsClassifier)
     planned_pipeline3 = make_choice(OneHotEncoder, NoOp) >> make_choice(PCA, Nystroem) >> make_choice(LogisticRegression, KNeighborsClassifier)
예제 #2
0
 def test_pipeline_2(self):
     from lale.lib.lale import NoOp
     from lale.lib.sklearn import Nystroem
     from lale.lib.sklearn import PCA
     from lale.lib.sklearn import LogisticRegression
     from lale.lib.sklearn import KNeighborsClassifier
     from lale.operators import make_choice, make_pipeline
     from lale.json_operator import to_json, from_json
     kernel_tfm_or_not = make_choice(NoOp, Nystroem)
     tfm = PCA
     clf = make_choice(LogisticRegression, KNeighborsClassifier)
     operator = make_pipeline(kernel_tfm_or_not, tfm, clf)
     json = to_json(operator)
     operator_2 = from_json(json)
     json_2 = to_json(operator_2)
     self.assertEqual(json, json_2)
예제 #3
0
파일: grammar.py 프로젝트: krprls/lale
 def _unfold(self, op: Operator, n: int) -> Optional[Operator]:
     """ Unroll all possible operators from the grammar `g` starting from    non-terminal `op` after `n` derivations.
     
     Parameters
     ----------
     op : Operator
         starting rule (e.g., `g.start`)
     n : int
         number of derivations
     
     Returns
     -------
     Optional[Operator]
     """
     if isinstance(op, BasePipeline):
         steps = op.steps()
         new_steps = [self._unfold(sop, n) for sop in op.steps()]
         step_map = {steps[i]: new_steps[i] for i in range(len(steps))}
         new_edges = [(step_map[s], step_map[d]) for s, d in op.edges()]
         if not None in new_steps:
             return get_pipeline_of_applicable_type(new_steps, new_edges,
                                                    True)
         return None
     if isinstance(op, OperatorChoice):
         steps = [
             s for s in (self._unfold(sop, n) for sop in op.steps()) if s
         ]
         return make_choice(*steps) if steps else None
     if isinstance(op, NonTerminal):
         return self._unfold(self._variables[op.name()], n -
                             1) if n > 0 else None
     if isinstance(op, IndividualOp):
         return op
     assert False, f"Unknown operator {op}"
예제 #4
0
    def test_make_choice_with_instance(self):
        from sklearn.datasets import load_iris

        from lale.operators import make_choice

        iris = load_iris()
        X, y = iris.data, iris.target
        tfm = PCA() | Nystroem() | NoOp()
        with self.assertRaises(AttributeError):
            # we are trying to trigger a runtime error here, so we ignore the static warning
            _ = tfm.fit(X, y)  # type: ignore
        _ = (OneHotEncoder | NoOp) >> tfm >> (LogisticRegression
                                              | KNeighborsClassifier)
        _ = ((OneHotEncoder | NoOp) >> (PCA | Nystroem) >>
             (LogisticRegression | KNeighborsClassifier))
        _ = (make_choice(OneHotEncoder, NoOp) >> make_choice(PCA, Nystroem) >>
             make_choice(LogisticRegression, KNeighborsClassifier))
예제 #5
0
 def test_with_operator_choice(self):
     from lale.operators import make_choice
     from lale.lib.lale import NoOp
     from lale.lib.sklearn import KNeighborsClassifier
     from lale.lib.sklearn import LogisticRegression
     from lale.lib.sklearn import Nystroem
     from lale.lib.sklearn import PCA
     kernel_tfm_or_not = NoOp | Nystroem
     tfm = PCA
     clf = make_choice(LogisticRegression, KNeighborsClassifier)
     clf.visualize(ipython_display=False)
     optimizable = kernel_tfm_or_not >> tfm >> clf
     optimizable.visualize(ipython_display=False)
예제 #6
0
def test_2_steps_regressor():
    T = make_choice(*[Op for (n, Op) in kls if n in transformers])
    R = make_choice(*[Op for (n, Op) in kls if n in regressors])
    base_test("transformer_regressor", T >> R, load_regression, scoring="r2")
예제 #7
0
def test_2_steps_classifier():
    T = make_choice(*[Op for (n, Op) in kls if n in transformers])
    C = make_choice(*[Op for (n, Op) in kls if n in classifiers])
    base_test("transformer_classifier", T >> C, load_iris)