def test_model_stacking_fit_transform(): model_stacking = Pipeline([ ModelStacking( [ SKLearnWrapper( GradientBoostingRegressor(), HyperparameterSpace({ "n_estimators": RandInt(50, 600), "max_depth": RandInt(1, 10), "learning_rate": LogUniform(0.07, 0.7) })), SKLearnWrapper( KMeans(), HyperparameterSpace({"n_clusters": RandInt(5, 10)})), ], joiner=NumpyTranspose(), judge=SKLearnWrapper( Ridge(), HyperparameterSpace({ "alpha": LogUniform(0.7, 1.4), "fit_intercept": Boolean() })), ) ]) expected_outputs_shape = (379, 1) data_inputs_shape = (379, 13) data_inputs = _create_data(data_inputs_shape) expected_outputs = _create_data(expected_outputs_shape) model_stacking, outputs = model_stacking.fit_transform( data_inputs, expected_outputs) assert outputs.shape == expected_outputs_shape
def test_hyperparam_space(): p = Pipeline([ AddFeatures([ SomeStep(hyperparams_space=HyperparameterSpace({"n_components": RandInt(1, 5)})), SomeStep(hyperparams_space=HyperparameterSpace({"n_components": RandInt(1, 5)})) ]), ModelStacking([ SomeStep(hyperparams_space=HyperparameterSpace({"n_estimators": RandInt(1, 1000)})), SomeStep(hyperparams_space=HyperparameterSpace({"n_estimators": RandInt(1, 1000)})), SomeStep(hyperparams_space=HyperparameterSpace({"max_depth": RandInt(1, 100)})), SomeStep(hyperparams_space=HyperparameterSpace({"max_depth": RandInt(1, 100)})) ], joiner=NumpyTranspose(), judge=SomeStep(hyperparams_space=HyperparameterSpace({"alpha": LogUniform(0.1, 10.0)})) ) ]) rvsed = p.get_hyperparams_space() p.set_hyperparams(rvsed) hyperparams = p.get_hyperparams() assert "AddFeatures" in hyperparams.keys() assert "SomeStep" in hyperparams["AddFeatures"] assert "n_components" in hyperparams["AddFeatures"]["SomeStep"] assert "SomeStep1" in hyperparams["AddFeatures"] assert "n_components" in hyperparams["AddFeatures"]["SomeStep1"] assert "SomeStep" in hyperparams["ModelStacking"] assert "n_estimators" in hyperparams["ModelStacking"]["SomeStep"] assert "SomeStep1" in hyperparams["ModelStacking"] assert "max_depth" in hyperparams["ModelStacking"]["SomeStep2"]
def __init__(self, brothers): super().__init__(brothers, SKLearnWrapper( Ridge(), HyperparameterSpace({ "alpha": LogUniform(0.1, 10.0), "fit_intercept": Boolean() })), joiner=NumpyTranspose())
def create_model_step(): return TensorflowV1ModelStep(create_graph=create_graph, create_loss=create_loss, create_optimizer=create_optimizer, has_expected_outputs=True).set_hyperparams( HyperparameterSamples( {'learning_rate': 0.01})).set_hyperparams_space( HyperparameterSpace({ 'learning_rate': LogUniform(0.0001, 0.01) }))
def test_hyperparam_space(): p = Pipeline([ AddFeatures([ SomeStep(hyperparams_space=HyperparameterSpace( {"n_components": RandInt(1, 5)})), SomeStep(hyperparams_space=HyperparameterSpace( {"n_components": RandInt(1, 5)})) ]), ModelStacking([ SomeStep(hyperparams_space=HyperparameterSpace( {"n_estimators": RandInt(1, 1000)})), SomeStep(hyperparams_space=HyperparameterSpace( {"n_estimators": RandInt(1, 1000)})), SomeStep(hyperparams_space=HyperparameterSpace( {"max_depth": RandInt(1, 100)})), SomeStep(hyperparams_space=HyperparameterSpace( {"max_depth": RandInt(1, 100)})) ], joiner=NumpyTranspose(), judge=SomeStep(hyperparams_space=HyperparameterSpace( {"alpha": LogUniform(0.1, 10.0)}))) ]) rvsed = p.get_hyperparams_space() p.set_hyperparams(rvsed) hyperparams = p.get_hyperparams() flat_hyperparams_keys = hyperparams.to_flat_dict().keys() assert 'AddFeatures' in hyperparams assert 'SomeStep' in hyperparams["AddFeatures"] assert "n_components" in hyperparams["AddFeatures"]["SomeStep"] assert 'SomeStep1' in hyperparams["AddFeatures"] assert "n_components" in hyperparams["AddFeatures"]["SomeStep1"] assert 'ModelStacking' in hyperparams assert 'SomeStep' in hyperparams["ModelStacking"] assert 'n_estimators' in hyperparams["ModelStacking"]["SomeStep"] assert 'SomeStep1' in hyperparams["ModelStacking"] assert 'n_estimators' in hyperparams["ModelStacking"]["SomeStep1"] assert 'SomeStep2' in hyperparams["ModelStacking"] assert 'max_depth' in hyperparams["ModelStacking"]["SomeStep2"] assert 'SomeStep3' in hyperparams["ModelStacking"] assert 'max_depth' in hyperparams["ModelStacking"]["SomeStep3"] assert 'AddFeatures__SomeStep1__n_components' in flat_hyperparams_keys assert 'AddFeatures__SomeStep__n_components' in flat_hyperparams_keys assert 'ModelStacking__SomeStep__n_estimators' in flat_hyperparams_keys assert 'ModelStacking__SomeStep1__n_estimators' in flat_hyperparams_keys assert 'ModelStacking__SomeStep2__max_depth' in flat_hyperparams_keys assert 'ModelStacking__SomeStep3__max_depth' in flat_hyperparams_keys
# pipeline using a flat dict or a nested dict. p = Pipeline([ AddFeatures([ SKLearnWrapper(PCA(n_components=2), HyperparameterSpace({"n_components": RandInt(1, 3)})), SKLearnWrapper(FastICA(n_components=2), HyperparameterSpace({"n_components": RandInt(1, 3)})), ]), ModelStacking( [ SKLearnWrapper( GradientBoostingRegressor(), HyperparameterSpace({ "n_estimators": RandInt(50, 600), "max_depth": RandInt(1, 10), "learning_rate": LogUniform(0.07, 0.7) })), SKLearnWrapper( GradientBoostingRegressor(), HyperparameterSpace({ "n_estimators": RandInt(50, 600), "max_depth": RandInt(1, 10), "learning_rate": LogUniform(0.07, 0.7) })), SKLearnWrapper( GradientBoostingRegressor(), HyperparameterSpace({ "n_estimators": RandInt(50, 600), "max_depth": RandInt(1, 10), "learning_rate": LogUniform(0.07, 0.7) })),
def main(): boston = load_boston() X, y = shuffle(boston.data, boston.target, random_state=13) X = X.astype(np.float32) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, shuffle=False) # Note that the hyperparameter spaces are defined here during the pipeline definition, but it could be already set # within the classes ar their definition if using custom classes, or also it could be defined after declaring the # pipeline using a flat dict or a nested dict. p = Pipeline([ AddFeatures([ SKLearnWrapper( PCA(n_components=2), HyperparameterSpace({"n_components": RandInt(1, 3)})), SKLearnWrapper( FastICA(n_components=2), HyperparameterSpace({"n_components": RandInt(1, 3)})), ]), ModelStacking( [ SKLearnWrapper( GradientBoostingRegressor(), HyperparameterSpace({ "n_estimators": RandInt(50, 600), "max_depth": RandInt(1, 10), "learning_rate": LogUniform(0.07, 0.7) })), SKLearnWrapper( KMeans(), HyperparameterSpace({"n_clusters": RandInt(5, 10)})), ], joiner=NumpyTranspose(), judge=SKLearnWrapper( Ridge(), HyperparameterSpace({ "alpha": LogUniform(0.7, 1.4), "fit_intercept": Boolean() })), ) ]) print("Meta-fitting on train:") p = p.meta_fit(X_train, y_train, metastep=RandomSearch( n_iter=10, higher_score_is_better=True, validation_technique=KFoldCrossValidationWrapper( scoring_function=r2_score, k_fold=10))) # Here is an alternative way to do it, more "pipeliney": # p = RandomSearch( # p, # n_iter=15, # higher_score_is_better=True, # validation_technique=KFoldCrossValidation(scoring_function=r2_score, k_fold=3) # ).fit(X_train, y_train) print("") print("Transforming train and test:") y_train_predicted = p.predict(X_train) y_test_predicted = p.predict(X_test) print("") print("Evaluating transformed train:") score_transform = r2_score(y_train_predicted, y_train) print('R2 regression score:', score_transform) print("") print("Evaluating transformed test:") score_test = r2_score(y_test_predicted, y_test) print('R2 regression score:', score_test)
def main(): def accuracy(data_inputs, expected_outputs): return np.mean( np.argmax(np.array(data_inputs), axis=1) == np.argmax( np.array(expected_outputs), axis=1)) # load the dataset df = read_csv('data/winequality-white.csv', sep=';') data_inputs = df.values data_inputs[:, -1] = data_inputs[:, -1] - 1 n_features = data_inputs.shape[1] - 1 n_classes = 10 p = Pipeline([ TrainOnlyWrapper(DataShuffler()), ColumnTransformerInputOutput( input_columns=[( [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], ToNumpy(np.float32) )], output_columns=[(11, Identity())] ), OutputTransformerWrapper(PlotDistribution(column=-1)), MiniBatchSequentialPipeline([ Tensorflow2ModelStep( create_model=create_model, create_loss=create_loss, create_optimizer=create_optimizer ) \ .set_hyperparams(HyperparameterSamples({ 'n_dense_layers': 2, 'input_dim': n_features, 'optimizer': 'adam', 'activation': 'relu', 'kernel_initializer': 'he_uniform', 'learning_rate': 0.01, 'hidden_dim': 20, 'n_classes': 3 })).set_hyperparams_space(HyperparameterSpace({ 'n_dense_layers': RandInt(2, 4), 'hidden_dim_layer_multiplier': Uniform(0.30, 1), 'input_dim': FixedHyperparameter(n_features), 'optimizer': Choice([ OPTIMIZERS.ADAM.value, OPTIMIZERS.SGD.value, OPTIMIZERS.ADAGRAD.value ]), 'activation': Choice([ ACTIVATIONS.RELU.value, ACTIVATIONS.TANH.value, ACTIVATIONS.SIGMOID.value, ACTIVATIONS.ELU.value, ]), 'kernel_initializer': Choice([ KERNEL_INITIALIZERS.GLOROT_NORMAL.value, KERNEL_INITIALIZERS.GLOROT_UNIFORM.value, KERNEL_INITIALIZERS.HE_UNIFORM.value ]), 'learning_rate': LogUniform(0.005, 0.01), 'hidden_dim': RandInt(3, 80), 'n_classes': FixedHyperparameter(n_classes) })) ], batch_size=33), OutputTransformerWrapper(Pipeline([ ExpandDim(), OneHotEncoder(nb_columns=n_classes, name='classes') ])) ]) auto_ml = AutoML( pipeline=p, hyperparams_repository=InMemoryHyperparamsRepository( cache_folder='trials'), hyperparams_optimizer=RandomSearchHyperparameterSelectionStrategy(), validation_splitter=ValidationSplitter(test_size=0.30), scoring_callback=ScoringCallback(accuracy, higher_score_is_better=True), callbacks=[ MetricCallback( name='classification_report_imbalanced_metric', metric_function=classificaiton_report_imbalanced_metric, higher_score_is_better=True), MetricCallback(name='f1', metric_function=f1_score_weighted, higher_score_is_better=True), MetricCallback(name='recall', metric_function=recall_score_weighted, higher_score_is_better=True), MetricCallback(name='precision', metric_function=precision_score_weighted, higher_score_is_better=True), EarlyStoppingCallback(max_epochs_without_improvement=3) ], n_trials=200, refit_trial=True, epochs=75) auto_ml = auto_ml.fit(data_inputs=data_inputs)
AddN(0.).set_hyperparams_space( HyperparameterSpace({ 'add': Quantized(hd=Uniform(0, 10)), })), AddN(0.).set_hyperparams_space( HyperparameterSpace({ 'add': Quantized(hd=Uniform(0, 10)), })) ])), (3.5, Pipeline([ FitTransformCallbackStep().set_name('callback'), AddN(0.).set_hyperparams_space( HyperparameterSpace({ 'add': LogUniform(min_included=1.0, max_included=5.0), })), AddN(0.).set_hyperparams_space( HyperparameterSpace({ 'add': LogUniform(min_included=1.0, max_included=5.0), })) ])), (3.5, Pipeline([ FitTransformCallbackStep().set_name('callback'), AddN(0.).set_hyperparams_space( HyperparameterSpace({ 'add': LogNormal(log2_space_mean=1.0, log2_space_std=0.5), })),
def main(): # Define classification models, and hyperparams. # See also HyperparameterSpace documentation : https://www.neuraxle.org/stable/api/neuraxle.hyperparams.space.html#neuraxle.hyperparams.space.HyperparameterSpace decision_tree_classifier = SKLearnWrapper( DecisionTreeClassifier(), HyperparameterSpace({ 'criterion': Choice(['gini', 'entropy']), 'splitter': Choice(['best', 'random']), 'min_samples_leaf': RandInt(2, 5), 'min_samples_split': RandInt(2, 4) })) extra_tree_classifier = SKLearnWrapper( ExtraTreeClassifier(), HyperparameterSpace({ 'criterion': Choice(['gini', 'entropy']), 'splitter': Choice(['best', 'random']), 'min_samples_leaf': RandInt(2, 5), 'min_samples_split': RandInt(2, 4) })) ridge_classifier = Pipeline([ OutputTransformerWrapper(NumpyRavel()), SKLearnWrapper( RidgeClassifier(), HyperparameterSpace({ 'alpha': Choice([0.0, 1.0, 10.0, 100.0]), 'fit_intercept': Boolean(), 'normalize': Boolean() })) ]).set_name('RidgeClassifier') logistic_regression = Pipeline([ OutputTransformerWrapper(NumpyRavel()), SKLearnWrapper( LogisticRegression(), HyperparameterSpace({ 'C': LogUniform(0.01, 10.0), 'fit_intercept': Boolean(), 'penalty': Choice(['none', 'l2']), 'max_iter': RandInt(20, 200) })) ]).set_name('LogisticRegression') random_forest_classifier = Pipeline([ OutputTransformerWrapper(NumpyRavel()), SKLearnWrapper( RandomForestClassifier(), HyperparameterSpace({ 'n_estimators': RandInt(50, 600), 'criterion': Choice(['gini', 'entropy']), 'min_samples_leaf': RandInt(2, 5), 'min_samples_split': RandInt(2, 4), 'bootstrap': Boolean() })) ]).set_name('RandomForestClassifier') # Define a classification pipeline that lets the AutoML loop choose one of the classifier. # See also ChooseOneStepOf documentation : https://www.neuraxle.org/stable/api/neuraxle.steps.flow.html#neuraxle.steps.flow.ChooseOneStepOf pipeline = Pipeline([ ChooseOneStepOf([ decision_tree_classifier, extra_tree_classifier, ridge_classifier, logistic_regression, random_forest_classifier ]) ]) # Create the AutoML loop object. # See also AutoML documentation : https://www.neuraxle.org/stable/api/neuraxle.metaopt.auto_ml.html#neuraxle.metaopt.auto_ml.AutoML auto_ml = AutoML( pipeline=pipeline, hyperparams_optimizer=RandomSearchHyperparameterSelectionStrategy(), validation_splitter=ValidationSplitter(test_size=0.20), scoring_callback=ScoringCallback(accuracy_score, higher_score_is_better=True), n_trials=7, epochs=1, hyperparams_repository=HyperparamsJSONRepository(cache_folder='cache'), refit_trial=True, continue_loop_on_error=False) # Load data, and launch AutoML loop ! X_train, y_train, X_test, y_test = generate_classification_data() auto_ml = auto_ml.fit(X_train, y_train) # Get the model from the best trial, and make predictions using predict. # See also predict documentation : https://www.neuraxle.org/stable/api/neuraxle.base.html#neuraxle.base.BaseStep.predict best_pipeline = auto_ml.get_best_model() y_pred = best_pipeline.predict(X_test) accuracy = accuracy_score(y_true=y_test, y_pred=y_pred) print("Test accuracy score:", accuracy) shutil.rmtree('cache')
from neuraxle.base import BaseStep, TruncableSteps, NonFittableMixin, MetaStepMixin from neuraxle.hyperparams.distributions import LogUniform, Quantized, RandInt, Boolean from neuraxle.hyperparams.space import HyperparameterSpace, HyperparameterSamples HYPERPARAMETERS_SPACE = HyperparameterSpace({ 'learning_rate': LogUniform(0.0001, 0.1), 'l2_weight_reg': LogUniform(0.0001, 0.1), 'momentum': LogUniform(0.01, 1.0), 'hidden_size': Quantized(LogUniform(16, 512)), 'num_layers': RandInt(1, 4), 'num_lstm_layers': RandInt(1, 2), 'use_xavier_init': Boolean(), 'use_max_pool_else_avg_pool': Boolean(), 'dropout_drop_proba': LogUniform(0.3, 0.7) }) HYPERPARAMETERS = HyperparameterSamples({ 'learning_rate': 0.1, 'l2_weight_reg': 0.001, 'hidden_size': 32, 'num_layers': 3, 'num_lstm_layers': 1,
def main(tmpdir): boston = load_boston() X, y = shuffle(boston.data, boston.target, random_state=13) X = X.astype(np.float32) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, shuffle=False) # Note that the hyperparameter spaces are defined here during the pipeline definition, but it could be already set # within the classes ar their definition if using custom classes, or also it could be defined after declaring the # pipeline using a flat dict or a nested dict. p = Pipeline([ AddFeatures([ SKLearnWrapper( PCA(n_components=2), HyperparameterSpace({"n_components": RandInt(1, 3)}) ), SKLearnWrapper( FastICA(n_components=2), HyperparameterSpace({"n_components": RandInt(1, 3)}) ), ]), ModelStacking([ SKLearnWrapper( GradientBoostingRegressor(), HyperparameterSpace({ "n_estimators": RandInt(50, 300), "max_depth": RandInt(1, 4), "learning_rate": LogUniform(0.07, 0.7) }) ), SKLearnWrapper( KMeans(), HyperparameterSpace({"n_clusters": RandInt(5, 10)}) ), ], joiner=NumpyTranspose(), judge=SKLearnWrapper( Ridge(), HyperparameterSpace({"alpha": LogUniform(0.7, 1.4), "fit_intercept": Boolean()}) ), ) ]) print("Meta-fitting on train:") auto_ml = AutoML( p, validation_splitter=ValidationSplitter(0.20), refit_trial=True, n_trials=10, epochs=1, # 1 epoc here due to using sklearn models that just fit once. cache_folder_when_no_handle=str(tmpdir), scoring_callback=ScoringCallback(mean_squared_error, higher_score_is_better=False), callbacks=[MetricCallback('mse', metric_function=mean_squared_error, higher_score_is_better=False)], hyperparams_repository=InMemoryHyperparamsRepository(cache_folder=str(tmpdir)) ) random_search = auto_ml.fit(X_train, y_train) p = random_search.get_best_model() print("") print("Transforming train and test:") y_train_predicted = p.predict(X_train) y_test_predicted = p.predict(X_test) print("") print("Evaluating transformed train:") score_transform = r2_score(y_train_predicted, y_train) print('R2 regression score:', score_transform) print("") print("Evaluating transformed test:") score_test = r2_score(y_test_predicted, y_test) print('R2 regression score:', score_test)
def test_deep_learning_pipeline(): # Given boston = load_boston() data_inputs, expected_outputs = shuffle(boston.data, boston.target, random_state=13) expected_outputs = expected_outputs.astype(np.float32) data_inputs = data_inputs.astype(np.float32) pipeline = Pipeline([ AddFeatures([ SKLearnWrapper( PCA(n_components=2), HyperparameterSpace({"n_components": RandInt(1, 3)})), SKLearnWrapper( FastICA(n_components=2), HyperparameterSpace({"n_components": RandInt(1, 3)})), ]), ModelStacking( [ SKLearnWrapper( GradientBoostingRegressor(), HyperparameterSpace({ "n_estimators": RandInt(50, 600), "max_depth": RandInt(1, 10), "learning_rate": LogUniform(0.07, 0.7) })), SKLearnWrapper( KMeans(n_clusters=7), HyperparameterSpace({"n_clusters": RandInt(5, 10)})), ], joiner=NumpyTranspose(), judge=SKLearnWrapper( Ridge(), HyperparameterSpace({ "alpha": LogUniform(0.7, 1.4), "fit_intercept": Boolean() })), ) ]) p = DeepLearningPipeline( pipeline, validation_size=VALIDATION_SIZE, batch_size=BATCH_SIZE, batch_metrics={'mse': to_numpy_metric_wrapper(mean_squared_error)}, shuffle_in_each_epoch_at_train=True, n_epochs=N_EPOCHS, epochs_metrics={'mse': to_numpy_metric_wrapper(mean_squared_error)}, scoring_function=to_numpy_metric_wrapper(mean_squared_error), ) # When p, outputs = p.fit_transform(data_inputs, expected_outputs) # Then batch_mse_train = p.get_batch_metric_train('mse') epoch_mse_train = p.get_epoch_metric_train('mse') batch_mse_validation = p.get_batch_metric_validation('mse') epoch_mse_validation = p.get_epoch_metric_validation('mse') assert len(epoch_mse_train) == N_EPOCHS assert len(epoch_mse_validation) == N_EPOCHS expected_len_batch_mse_train = math.ceil( (len(data_inputs) / BATCH_SIZE) * (1 - VALIDATION_SIZE)) * N_EPOCHS expected_len_batch_mse_validation = math.ceil( (len(data_inputs) / BATCH_SIZE) * VALIDATION_SIZE) * N_EPOCHS assert len(batch_mse_train) == expected_len_batch_mse_train assert len(batch_mse_validation) == expected_len_batch_mse_validation last_batch_mse_validation = batch_mse_validation[-1] last_batch_mse_train = batch_mse_train[-1] last_epoch_mse_train = epoch_mse_train[-1] last_epoch_mse_validation = epoch_mse_validation[-1] assert last_batch_mse_train < last_batch_mse_validation assert last_epoch_mse_train < last_epoch_mse_validation assert last_batch_mse_train < 1 assert last_epoch_mse_train < 1
'add': Choice(choice_list=[0, 1.5, 2, 3.5, 4, 5, 6]), })) ])), (3.5, Pipeline([ FitTransformCallbackStep().set_name('callback'), AddN(0.).set_hyperparams_space(HyperparameterSpace({ 'add': Quantized(hd=Uniform(0, 10)), })), AddN(0.).set_hyperparams_space(HyperparameterSpace({ 'add': Quantized(hd=Uniform(0, 10)), })) ])), (3.5, Pipeline([ FitTransformCallbackStep().set_name('callback'), AddN(0.).set_hyperparams_space(HyperparameterSpace({ 'add': LogUniform(min_included=1.0, max_included=5.0), })), AddN(0.).set_hyperparams_space(HyperparameterSpace({ 'add': LogUniform(min_included=1.0, max_included=5.0), })) ])), (3.5, Pipeline([ FitTransformCallbackStep().set_name('callback'), AddN(0.).set_hyperparams_space(HyperparameterSpace({ 'add': LogNormal(log2_space_mean=1.0, log2_space_std=0.5), })), AddN(0.).set_hyperparams_space(HyperparameterSpace({ 'add': LogNormal(log2_space_mean=1.0, log2_space_std=0.5), })) ])), (3.5, Pipeline([