def test_compute(): a = delayed(1) + 5 b = a + 1 c = a + 2 assert compute(b, c) == (7, 8) assert compute(b) == (7, ) assert compute([a, b], c) == ([6, 7], 8)
def test_compute(): a = delayed(1) + 5 b = a + 1 c = a + 2 assert compute(b, c) == (7, 8) assert compute(b) == (7,) assert compute([a, b], c) == ([6, 7], 8)
def test_nout(): func = delayed(lambda x: (x, -x), nout=2, pure=True) x = func(1) assert len(x) == 2 a, b = x assert compute(a, b) == (1, -1) assert a._length is None assert b._length is None pytest.raises(TypeError, lambda: len(a)) pytest.raises(TypeError, lambda: list(a)) pytest.raises(ValueError, lambda: delayed(add, nout=-1)) pytest.raises(ValueError, lambda: delayed(add, nout=True)) func = delayed(add, nout=None) a = func(1) assert a._length is None pytest.raises(TypeError, lambda: list(a)) pytest.raises(TypeError, lambda: len(a)) func = delayed(lambda x: (x, ), nout=1, pure=True) x = func(1) assert len(x) == 1 a, = x assert a.compute() == 1 assert a._length is None pytest.raises(TypeError, lambda: len(a)) func = delayed(lambda x: tuple(), nout=0, pure=True) x = func(1) assert len(x) == 0 assert x.compute() == tuple()
def test_nout(): func = delayed(lambda x: (x, -x), nout=2, pure=True) x = func(1) assert len(x) == 2 a, b = x assert compute(a, b) == (1, -1) assert a._length is None assert b._length is None pytest.raises(TypeError, lambda: len(a)) pytest.raises(TypeError, lambda: list(a)) pytest.raises(ValueError, lambda: delayed(add, nout=-1)) pytest.raises(ValueError, lambda: delayed(add, nout=True)) func = delayed(add, nout=None) a = func(1) assert a._length is None pytest.raises(TypeError, lambda: list(a)) pytest.raises(TypeError, lambda: len(a)) func = delayed(lambda x: (x,), nout=1, pure=True) x = func(1) assert len(x) == 1 a, = x assert a.compute() == 1 assert a._length is None pytest.raises(TypeError, lambda: len(a)) func = delayed(lambda x: tuple(), nout=0, pure=True) x = func(1) assert len(x) == 0 assert x.compute() == tuple()
def _fit(self, X, y, parameter_iterable): estimator = from_sklearn(self.estimator) self.scorer_ = check_scoring(estimator, scoring=self.scoring) cv = check_cv(self.cv, X, y, classifier=is_classifier(estimator)) n_folds = len(cv) X, y = check_X_y(X, y) tups = [] parameters = [] train_test_sets = list(cv.split(X, y)) for params in parameter_iterable: est = estimator.set_params(**params) for X_train, y_train, X_test, y_test in train_test_sets: fit = est.fit(X_train, y_train, **self.fit_params) tups.append(score_and_n(self.scorer_, fit, X_test, y_test)) parameters.append(params) # Compute results get = self.get or _globals['get'] or threaded.get scores, n_samples = zip(*compute(tups, get=get)[0]) # Extract grid_scores and best parameters grid_scores = get_grid_scores(scores, parameters, n_samples, n_folds, self.iid) best = get_best(grid_scores) # Update attributes self.grid_scores_ = grid_scores self.best_params_ = best.parameters self.best_score_ = best.mean_validation_score # Refit if needed if self.refit: self.best_estimator_ = (estimator.set_params( **best.parameters).fit(X, y, **self.fit_params).compute(get=get)) return self