コード例 #1
0
ファイル: ics_bagging.py プロジェクト: glemaitre/brew
    def __init__(self,
                 K=10,
                 alpha=0.75,
                 base_classifier=None,
                 n_classifiers=100,
                 combination_rule='majority_vote',
                 diversity_metric='e',
                 positive_label=1):

        self.K = K
        self.alpha = alpha

        self.base_classifier = base_classifier
        self.n_classifiers = n_classifiers
        self.combination_rule = combination_rule
        self.positive_label = positive_label

        self.classifiers = None
        self.ensemble = None
        self.combiner = Combiner(rule=combination_rule)

        self.diversity_metric = diversity_metric
        self.diversity = Diversity(metric=diversity_metric)

        self.validation_X = None
        self.validation_y = None
コード例 #2
0
    def __init__(self, ensemble=None, selector=None, combiner=None):
        self.ensemble = ensemble
        self.selector = selector

        if combiner == None:
            combiner = Combiner(rule='majority_vote')

        self.combiner = combiner
コード例 #3
0
    def test__arguments(self):

        c = MockClassifier()

        pool = Ensemble(classifiers=[c])
        combiner = Combiner(rule='majority_vote')

        model = EnsembleClassifier(ensemble=pool, combiner=combiner)
コード例 #4
0
ファイル: bagging.py プロジェクト: glemaitre/brew
    def __init__(self,
                 base_classifier=None,
                 n_classifiers=100,
                 combination_rule='majority_vote'):

        self.base_classifier = base_classifier
        self.n_classifiers = n_classifiers
        self.ensemble = None
        self.combiner = Combiner(rule=combination_rule)
コード例 #5
0
ファイル: random_subspace.py プロジェクト: glemaitre/brew
 def __init__(self,
              base_classifier=None,
              n_classifiers=100,
              combination_rule='majority_vote',
              max_features=0.5):
     self.base_classifier = base_classifier
     self.n_classifiers = n_classifiers
     self.combiner = Combiner(rule=combination_rule)
     self.classifiers = None
     self.ensemble = None
     self.max_features = max_features
コード例 #6
0
    def __init__(self, classifierList, combiningMethod):
        classifiers = [None] * (len(classifierList))
        for key, tuple in enumerate(classifierList):
            classifiers[key] = tuple[1]

        hybridEnsemble = Ensemble(classifiers=classifiers)
        hybridEnsembleClassifier = EnsembleClassifier(
            ensemble=hybridEnsemble, combiner=Combiner(combiningMethod))

        super().__init__(hybridEnsembleClassifier)
        self.name = "ensemble"
コード例 #7
0
ファイル: bagging.py プロジェクト: glemaitre/brew
    def __init__(self,
                 base_classifier=None,
                 n_classifiers=100,
                 combination_rule='majority_vote'):

        self.base_classifier = base_classifier
        self.n_classifiers = n_classifiers

        # using the sklearn implementation of bagging for now
        self.sk_bagging = BaggingClassifier(base_estimator=base_classifier,
                                            n_estimators=n_classifiers,
                                            max_samples=1.0,
                                            max_features=1.0)

        self.ensemble = Ensemble()
        self.combiner = Combiner(rule=combination_rule)
コード例 #8
0
import fris_stolp_test
clf4 = fris_stolp_test.SklearnHelper

# Creating Ensemble
ensemble = Ensemble([clf1, clf2, clf3, clf4])
eclf = EnsembleClassifier(ensemble=ensemble, combiner='mean')

# Creating Stacking
layer_1 = Ensemble([clf1, clf2, clf3])
layer_2 = Ensemble([sklearn.clone(clf1)])

stack = EnsembleStack(cv=3)

stack.add_layer(layer_1)
stack.add_layer(layer_2)

sclf = EnsembleStackClassifier(stack, combiner=Combiner('mean'))

sclf.fit(X_train.values, y_train.values)

y_pre = sclf.predict(X_test.values)

precision = precision_score(y_test, y_pre)
recall = recall_score(y_test, y_pre)
accuracy = accuracy_score(y_test, y_pre)
fmera = f1_score(y_test, y_pre)

if __name__ == '__main__':
    print("presicion ", precision, " recall ", recall, " fmera ", fmera,
          " accuracy ", accuracy)
コード例 #9
0
ファイル: stacker.py プロジェクト: gitter-badger/brew
 def __init__(self, stack, combiner=None):
     self.stack = stack
     self.combiner = combiner
     if combiner is None:
         self.combiner = Combiner(rule='majority_vote')
コード例 #10
0
ファイル: test_combiner.py プロジェクト: anu19s/myshop
 def test_median(self):
     comb = Combiner(rule='median')
     assert comb.rule == median_rule
コード例 #11
0
ファイル: test_combiner.py プロジェクト: anu19s/myshop
 def test_min(self):
     comb = Combiner(rule='min')
     assert comb.rule == min_rule
コード例 #12
0
ファイル: test_combiner.py プロジェクト: anu19s/myshop
 def test_max(self):
     comb = Combiner(rule='max')
     assert comb.rule == max_rule
コード例 #13
0
ファイル: test_combiner.py プロジェクト: anu19s/myshop
 def test_majority_vote(self):
     comb = Combiner(rule='majority_vote')
     assert comb.rule == majority_vote_rule
コード例 #14
0
ファイル: test_combiner.py プロジェクト: anu19s/myshop
 def test_default_rule(self):
     comb = Combiner()
     assert comb.rule == majority_vote_rule
コード例 #15
0
ファイル: subbb.py プロジェクト: SafwanMahmood/ML-algorithms
                      max_features=1.0),
    AdaBoostClassifier(DecisionTreeClassifier(max_depth=2),
                       n_estimators=600,
                       learning_rate=1),
    BaggingClassifier(ExtraTreesClassifier(criterion='entropy',
                                           max_depth=100,
                                           n_estimators=100),
                      max_samples=1.0,
                      max_features=1.0)
]
clfs = classifiers  # [clf1, clf2]
ens = Ensemble(classifiers=clfs)

# create your Combiner
# the rules can be 'majority_vote', 'max', 'min', 'mean' or 'median'
comb = Combiner(rule='max')

# now create your ensemble classifier
ensemble_clf = EnsembleClassifier(ensemble=ens, combiner=comb)
ensemble_clf = ensemble_clf.fit(X_train, Y_train)
y_tested = ensemble_clf.predict(X_test)

# for i in xrange(1,10):
#     clf = BaggingClassifier(DecisionTreeClassifier(criterion = 'entropy', max_depth = i + 100),max_samples=1.0, max_features=1.0)
#     clf = clf.fit(X_train, Y_train)
#     y_tested1 = clf.predict(X_test)
#     for a in range(len(y_tested)):
#         y_tested[a] = (y_tested[a] & y_tested1[a])
#     clf = BaggingClassifier(ExtraTreesClassifier(criterion = 'entropy', max_depth = i + 100,n_estimators=100+i),max_samples=1.0, max_features=1.0)
#     clf = clf.fit(X_train, Y_train)
#     y_tested2 = clf.predict(X_test)
コード例 #16
0
# creating a new ensemble of ensembles
ens = Ensemble(classifiers=[clf1,ensemble_clf])
ensemble_ens = EnsembleClassifier(ensemble=ens, combiner=cmb)
 
# and you can use it in the same way as a regular ensemble
ensemble_ens.fit(X, y)
ensemble_ens.predict(X)
ensemble_ens.predict_proba(X)



'''

# l'altra libreria

# create your Ensemble clf1 can be an EnsembleClassifier object too
ens = Ensemble(classifiers=[mode_9, mode_9])

# create your Combiner (combination rule)
# it can be 'min', 'max', 'majority_vote' ...
cmb = Combiner(rule='mean')

# and now, create your Ensemble Classifier
ensemble_clf = EnsembleClassifier(ensemble=ens, combiner=cmb)

# assuming you have a X, y data you can use
ensemble_clf.fit(val_path, val_path)

print("-----------d-----------")
ensemble_clf.predict(val_path)
コード例 #17
0
my_data = genfromtxt('/Users/samarth/Desktop/data.csv', delimiter=',')

for item in range(0, my_data.shape[0]):
    var = my_data[item][4]
    my_data[item][4] = int(range_scaler(5538, 600000, 100, 1000, var))
'''	
if my_data[item][6] < 100 or my_data[item][6] > 1000 or (my_data[item][6]>my_data[item][4]):
		my_data = np.delete(my_data, (item), axis = 0)
'''
my_data = my_data[np.logical_not(
    np.logical_and(my_data[:, 4] < 100, my_data[:, 4] > 1000))]
my_data = my_data[np.logical_not(my_data[:, 4] > my_data[:, 6])]

ensemble = Ensemble([clf1, clf2, clf3])
eclf = EnsembleClassifier(ensemble=ensemble, combiner=Combiner('mean'))

layer_1 = Ensemble([clf1, clf2, clf3])
layer_2 = Ensemble([sklearn.clone(clf1)])

stack = EnsembleStack(cv=3)

stack.add_layer(layer_1)
stack.add_layer(layer_2)

sclf = EnsembleStackClassifier(stack)

clf_list = [clf1, clf2, clf3, eclf, sclf]
lbl_list = [
    'Logistic Regression', 'Random Forest', 'RBF kernel SVM', 'Ensemble',
    'Stacking'
コード例 #18
0
# Create plot
plt.title("Learning Curve")
plt.xlabel("Training Set Size"), plt.ylabel("Accuracy Score"), plt.legend(loc="best")
plt.tight_layout()
plt.show()


# Merge two classifier Randomforest and KNN
from brew.base import Ensemble
from brew.base import EnsembleClassifier
from brew.combination.combiner import Combiner
# Random Sampling  
X_resampled, y_resampled = RandomUnderSampler(random_state=0).fit_sample(X_train, y_train)
clfs = [classifier_rf, classifier_knn]
ens = Ensemble(classifiers = clfs)
comb = Combiner(rule='max')
eclf = EnsembleClassifier(ensemble=ens, combiner=Combiner('mean'))
eclf.fit(X_resampled, y_resampled)
y_pred = eclf.predict(X_test)
cm = confusion_matrix(y_test, y_pred)
print(classification_report(y_test, y_pred))


# PCA Using feature reduction technique
# Check how many components needed in a way which will express maximum variance
from sklearn.decomposition import PCA
pca = PCA(n_components = None)
X_train_pca = pca.fit(X_train)
X_test_pca = pca.fit(X_test)
explained_variance = pca.explained_variance_ratio_