else: inst = obj() yield inst @pytest.mark.parametrize('estimator, check', [ pytest.param( copy.deepcopy(estimator), check, id=f'{estimator}:{check.__name__}') for estimator in list(get_all_estimators()) + [ feature_extraction.TFIDF(), linear_model.LogisticRegression(), preprocessing.StandardScaler() | linear_model.LinearRegression(), preprocessing.StandardScaler() | linear_model.PAClassifier(), preprocessing.StandardScaler() | multiclass.OneVsRestClassifier(linear_model.LogisticRegression()), preprocessing.StandardScaler() | multiclass.OneVsRestClassifier(linear_model.PAClassifier()), naive_bayes.GaussianNB(), preprocessing.StandardScaler(), cluster.KMeans(n_clusters=5, seed=42), preprocessing.MinMaxScaler(), preprocessing.MinMaxScaler() + preprocessing.StandardScaler(), preprocessing.PolynomialExtender(), feature_selection.VarianceThreshold(), feature_selection.SelectKBest(similarity=stats.PearsonCorrelation()) ] for check in utils.estimator_checks.yield_checks(estimator) ]) def test_check_estimator(estimator, check): check(estimator)
from sklearn.utils import estimator_checks from creme import cluster from creme import compat from creme import feature_selection from creme import linear_model from creme import multiclass from creme import naive_bayes from creme import preprocessing from creme import stats from creme import utils ESTIMATORS = [ naive_bayes.GaussianNB(), preprocessing.StandardScaler(), cluster.KMeans(random_state=42), preprocessing.MinMaxScaler(), preprocessing.MinMaxScaler() + preprocessing.StandardScaler(), preprocessing.PolynomialExtender(), feature_selection.VarianceThreshold(), feature_selection.SelectKBest(similarity=stats.PearsonCorrelation()) ] @pytest.mark.parametrize('estimator', [ pytest.param(copy.deepcopy(estimator), id=str(estimator)) for estimator in ESTIMATORS ]) def test_sklearn_check_estimator(estimator): estimator_checks.check_estimator( compat.convert_creme_to_sklearn(estimator))
else: inst = obj() yield inst @pytest.mark.parametrize('estimator', [ pytest.param(copy.deepcopy(estimator), id=str(estimator)) for estimator in list(get_all_estimators()) + [ feature_extraction.TFIDFVectorizer(), linear_model.LogisticRegression(), preprocessing.StandardScaler() | linear_model.LinearRegression(), preprocessing.StandardScaler() | linear_model.PAClassifier(), preprocessing.StandardScaler() | multiclass.OneVsRestClassifier(linear_model.LogisticRegression()), preprocessing.StandardScaler() | multiclass.OneVsRestClassifier(linear_model.PAClassifier()), naive_bayes.GaussianNB(), preprocessing.StandardScaler(), cluster.KMeans(n_clusters=5, random_state=42), preprocessing.MinMaxScaler(), preprocessing.MinMaxScaler() + preprocessing.StandardScaler(), preprocessing.PolynomialExtender(), feature_selection.VarianceThreshold(), feature_selection.SelectKBest(similarity=stats.PearsonCorrelation()) ] ]) def test_check_estimator(estimator): utils.check_estimator(estimator)
import pytest from sklearn.utils import estimator_checks from sklearn import linear_model as sk_linear_model from creme import base from creme import cluster from creme import compat from creme import linear_model from creme import preprocessing @pytest.mark.parametrize('estimator', [ pytest.param(estimator, id=str(estimator)) for estimator in [ linear_model.LinearRegression(), linear_model.LogisticRegression(), preprocessing.StandardScaler(), cluster.KMeans(seed=42) ] ]) def test_creme_to_sklearn_check_estimator(estimator: base.Estimator): skl_estimator = compat.convert_creme_to_sklearn(estimator) estimator_checks.check_estimator(skl_estimator) def test_sklearn_check_twoway(): estimator = sk_linear_model.SGDRegressor() creme_estimator = compat.convert_sklearn_to_creme(estimator) skl_estimator = compat.convert_creme_to_sklearn(creme_estimator) estimator_checks.check_estimator(skl_estimator)
import pickle import json import ast #read configurations config = json.loads(input()) savePath = config['savePath'] n_clusters = config['n_clusters'] halflife = config['halflife'] sigma = config['sigma'] mu = config['mu'] random_state = config['randomState'] k_means = cluster.KMeans(n_clusters=n_clusters, halflife=halflife, mu=mu, sigma=sigma, random_state=random_state) output = {} while True: #wait request data = input() Xi = json.loads(data) output["Predict"] = k_means.predict_one(Xi) model = k_means.fit_one(Xi)