for r, _ in enumerate(grid.cv_results_['mean_test_score']): print( "%.3f+/-%.2f %r" % (grid.cv_results_['mean_test_score'][r], grid.cv_results_['std_test_score'][r], grid.cv_results_['params'][r])) print('Best parameters: %s' % grid.best_params_) print('Accuracy: %.2f' % grid.best_score_) from sklearn.ensemble import StackingClassifier estimators = [('dt', clf2), ('kn', pip3)] clf4 = StackingClassifier(estimators=estimators, final_estimator=pip1) clf4.fit(X_train, y_train).score(X_test, y_test) clf4.get_params() # bagging import pandas as pd df_wine = pd.read_csv( 'https://archive.ics.uci.edu/ml/' 'machine-learning-databases/wine/wine.data', header=None) df_wine.columns = [ 'Class label', 'Alcohol', 'Malic Acid', 'Ash', 'Alcalinity of ash', 'Magnesium', 'Total Phenoles', 'Flavanoids', 'Nonflaveoid phenold', 'Prantocyanins', 'color intensity', 'Hue', 'OD280/OD315 of diluted wines', 'Proline' ] #drop 1 class
estimators = [ ('naive-bayes', GaussianNB()), ('random-forest', rfc(n_estimators = 100, random_state = 0)), ('mlp', MLPClassifier(activation = "relu", alpha = 0.05, random_state = 0)) ] # Setting up the Meta-Classifier clf = StackingClassifier( estimators = estimators, final_estimator = LogisticRegression(random_state = 0) ) # fitting my model clf.fit(x_train, y_train) # getting info about the hyperparameters clf.get_params() ''' {'cv': None, 'estimators': [('naive-bayes', GaussianNB(priors=None, var_smoothing=1e-09)), ('random-forest', RandomForestClassifier(bootstrap=True, ccp_alpha=0.0, class_weight=None, criterion='gini', max_depth=None, max_features='auto', max_leaf_nodes=None, max_samples=None, min_impurity_decrease=0.0, min_impurity_split=None, min_samples_leaf=1, min_samples_split=2, min_weight_fraction_leaf=0.0, n_estimators=100, n_jobs=None, oob_score=False, random_state=0, verbose=0, warm_start=False)), ('mlp', MLPClassifier(activation='relu', alpha=0.05, batch_size='auto', beta_1=0.9,