コード例 #1
0
my_data = my_data[np.logical_not(
    np.logical_and(my_data[:, 4] < 100, my_data[:, 4] > 1000))]
my_data = my_data[np.logical_not(my_data[:, 4] > my_data[:, 6])]

ensemble = Ensemble([clf1, clf2, clf3])
eclf = EnsembleClassifier(ensemble=ensemble, combiner=Combiner('mean'))

layer_1 = Ensemble([clf1, clf2, clf3])
layer_2 = Ensemble([sklearn.clone(clf1)])

stack = EnsembleStack(cv=3)

stack.add_layer(layer_1)
stack.add_layer(layer_2)

sclf = EnsembleStackClassifier(stack)

clf_list = [clf1, clf2, clf3, eclf, sclf]
lbl_list = [
    'Logistic Regression', 'Random Forest', 'RBF kernel SVM', 'Ensemble',
    'Stacking'
]
X = my_data[:, :6]
y = my_data[:, 6]

d = {yi: i for i, yi in enumerate(set(y))}
y = np.array([d[yi] for yi in y])

X_train, X_test, y_train, y_test = train_test_split(X,
                                                    y,
                                                    test_size=0.33,
コード例 #2
0
# Initializing Classifiers
clf1 = LogisticRegression(random_state=0)
clf2 = RandomForestClassifier(random_state=0)
clf3 = SVC(random_state=0, probability=True)

# Creating Ensemble
ensemble = Ensemble([clf1, clf2, clf3])
eclf = EnsembleClassifier(ensemble=ensemble, combiner=Combiner('mean'))

# Creating Stacking
layer_1 = Ensemble([clf1, clf2, clf3])
layer_2 = Ensemble([sklearn.clone(clf1)])

stack = EnsembleStack(cv=3)

stack.add_layer(layer_1)
stack.add_layer(layer_2)

sclf = EnsembleStackClassifier(stack)

clf_list = [clf1, clf2, clf3, eclf, sclf]
lbl_list = [
    'Logistic Regression', 'Random Forest', 'RBF kernel SVM', 'Ensemble',
    'Stacking'
]

# Loading some example data
X, y = iris_data()
X = X[:, [0, 2]]
コード例 #3
0
import fris_stolp_test
clf4 = fris_stolp_test.SklearnHelper

# Creating Ensemble
ensemble = Ensemble([clf1, clf2, clf3, clf4])
eclf = EnsembleClassifier(ensemble=ensemble, combiner='mean')

# Creating Stacking
layer_1 = Ensemble([clf1, clf2, clf3])
layer_2 = Ensemble([sklearn.clone(clf1)])

stack = EnsembleStack(cv=3)

stack.add_layer(layer_1)
stack.add_layer(layer_2)

sclf = EnsembleStackClassifier(stack, combiner=Combiner('mean'))

sclf.fit(X_train.values, y_train.values)

y_pre = sclf.predict(X_test.values)

precision = precision_score(y_test, y_pre)
recall = recall_score(y_test, y_pre)
accuracy = accuracy_score(y_test, y_pre)
fmera = f1_score(y_test, y_pre)

if __name__ == '__main__':
    print("presicion ", precision, " recall ", recall, " fmera ", fmera,
          " accuracy ", accuracy)
コード例 #4
0
clf3 = SVC(random_state=0, probability=True)

# Creating Ensemble
ensemble = Ensemble([clf1, clf2, clf3])
eclf = EnsembleClassifier(ensemble=ensemble, combiner=Combiner('mean'))

# Creating Stacking
layer_1 = Ensemble([clf1, clf2, clf3])
layer_2 = Ensemble([sklearn.clone(clf1)])

stack = EnsembleStack(cv=3)

stack.add_layer(layer_1)
stack.add_layer(layer_2)

sclf = EnsembleStackClassifier(stack)

clf_list = [clf1, clf2, clf3, eclf, sclf]
lbl_list = ['Logistic Regression', 'Random Forest', 'RBF kernel SVM', 'Ensemble', 'Stacking']

# Loading some example data
X, y = iris_data()
X = X[:,[0, 2]]

# WARNING, WARNING, WARNING
# brew requires classes from 0 to N, no skipping allowed
d = {yi : i for i, yi in enumerate(set(y))}
y = np.array([d[yi] for yi in y])

import numpy as np
from sklearn.model_selection import train_test_split
コード例 #5
0
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import itertools

import brew
from brew.base import Ensemble
from brew.combination.combiner import Combiner
from brew.stacking.stacker import EnsembleStack
from brew.stacking.stacker import EnsembleStackClassifier

import sklearn
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier

layer_1 = [
    SVC(probability=True),
    RandomForestClassifier(n_estimators=100),
    ExtraTreesClassifier(n_estimators=100)
]

layer_2 = [SVC(probability=True), LogisticRegression(max_iter=500)]

stack = EnsembleStack(cv=10)  # number of folds per layer
stack.add_layer(Ensemble(layer_1))
stack.add_layer(Ensemble(layer_2))

clf = EnsembleStackClassifier(stack, Combiner('mean'))


# Stacking model


layer_1 = Ensemble([model1,model2,model3])
layer_2 = Ensemble([sklearn.clone(model3)])


stack = EnsembleStack(cv=3)

stack.add_layer(layer_1)
stack.add_layer(layer_2)

sclf = EnsembleStackClassifier(stack)


sclf.fit(feature_train , label_train)
pred = sclf.predict(feature_test)
Accuracy.append(accuracy_score(pred,label_test))
Precision.append(precision_score(pred,label_test))
Recall.append(recall_score(pred,label_test))
f1.append(f1_score(pred,label_test))
false_positive_rate, true_positive_rate, thresholds = roc_curve(label_test, pred)




import matplotlib.pyplot as plt
X=[10,15,20,25]
コード例 #7
0
# clf3=Bagging(base_classifier=SVC(random_state=0, probability=True))

# Creating Ensemble
ensemble = Ensemble([clf1, clf2, clf3])
eclf = EnsembleClassifier(ensemble=ensemble, combiner=Combiner('mean'))

# Creating Stacking
layer_1 = Ensemble([clf1, clf2, clf3])
layer_2 = Ensemble([sklearn.clone(clf1)])

stack = EnsembleStack(cv=3)

stack.add_layer(layer_1)
stack.add_layer(layer_2)

sclf = EnsembleStackClassifier(stack)

clf_list = [clf1, clf2, clf3, eclf, sclf]
lbl_list = [
    'Logistic Regression', 'Random Forest', 'RBF kernel SVM', 'Ensemble',
    'Stacking'
]

# Loading some example data
X, y = iris_data()
X = X[:, [0, 2]]
print type(X)

# WARNING, WARNING, WARNING
# brew requires classes from 0 to N, no skipping allowed
d = {yi: i for i, yi in enumerate(set(y))}
コード例 #8
0
ファイル: competition.py プロジェクト: naiaden/lama-dsl
#    ###############################
#    print("== stacked xgboost soft")
#    X_xgbs = np.rot90(np.array([gg[1] for gg in local_predictions_proba]))
#    xgbs = xgb.XGBClassifier()
#    xgbs.fit(X_xgbs, y_train)
#    xgbs_score = xgbs.score(X_xgbs, y_test)
#    combined_pipelines['stackxgbsoft']['scores'] = np.concatenate((combined_pipelines['stackxgbsoft']['scores'], [xgbs_score]))

#     print("\tWorking with:")

    brew_ensemble_l1 = Ensemble([mnb_st_pl, fat_pl, sgd_sc_pl])
    brew_ensemble_l2 = Ensemble([sgd_st_pl, mkn_lm])#, knn_st])
    stack = EnsembleStack(cv=3)
    stack.add_layer(brew_ensemble_l1)
    stack.add_layer(brew_ensemble_l2)
    sclf = EnsembleStackClassifier(stack)
    #sclf.fit(X_train, y_train)
    sclf_score = sclf.score(X_test, y_test)
    combined_pipelines['ensemble']['scores'] = np.concatenate((combined_pipelines['ensemble']['scores'], [sclf_score]))
    print("sclf_score", sclf_score)

print()
print("= SUMMARY")
best_predictions = []
for pipeline_name in all_pipelines.keys():
    index, value = max(enumerate(all_pipelines[pipeline_name]['scores']), key=operator.itemgetter(1))
    print(pipeline_name, index, value)
    best_predictions.append((pipeline_name, all_pipelines[pipeline_name]['predictions'][index]))
        
for pipeline_name in all_pipelines.keys():
    print(pipeline_name)