Exemplo n.º 1
0
def chooseClassification(name):
    print "Choosen classfier:", name
    return {
        'NB': GaussianNB(),
        'ADA': adaBoost(n_estimators=2),
        'RF': rf(n_estimators=7),
        'KNN': knn(n_neighbors=15, p=1),
        'SVM': svm.SVC(C=0.01, kernel='rbf', probability=True),
        'BAG': BaggingClassifier(n_estimators=7)  #base_estimator=knn(),
        #bootstrap=True,
        #bootstrap_features=True,
        #oob_score=True,
        #max_features = 10,
        #max_samples = 100),
    }.get(name, GaussianNB())  # default Gaussian Naive Bayes
Exemplo n.º 2
0
def chooseClassification(name):
    print "Choosen classfier:",name
    return {
        'NB': GaussianNB(),
        'ADA': adaBoost(n_estimators=50),
        'RF': rf(n_estimators = 100),
        'KNN': knn(n_neighbors=15, p=1),
        'SVM': svm.SVC(kernel='rbf', probability=True),
        'BAG':BaggingClassifier(n_estimators = 30)#base_estimator=knn(),
                             #bootstrap=True,
                             #bootstrap_features=True,
                             #oob_score=True,
                             #max_features = 10,
                             #max_samples = 100),
        }.get(name, GaussianNB())    # default Gaussian Naive Bayes
Exemplo n.º 3
0
                                                    random_state=0)

print(X_train.shape)
print(y_train.shape)
print(X_test.shape)
print(y_test.shape)

#############################################
#ADABOOST
#############################################

#cross validation and grid search for hyperparameter estimation
param_dist = {'algorithm': ["SAMME", "SAMME.R"]}

cv = StratifiedShuffleSplit(n_splits=5, test_size=0.2, random_state=0)
clf = GridSearchCV(adaBoost(), param_grid=param_dist, cv=cv)
clf = clf.fit(X_train, y_train.values.ravel())

print("Best estimator found by grid search:")
print(clf.best_estimator_)

#apply the classifier on the test data and show the accuracy of the model
print('the acuracy of Adaboost is:')
print(clf.score(X_test, y_test.values.ravel()))

prediction = clf.predict(X_test)
#use the metrics.classification to report.
print("Confusion matrix:\n%s" % metrics.confusion_matrix(y_test, prediction))
print("Classification report:\n %s\n" %
      metrics.classification_report(y_test, prediction))
y_pred = model.predict(X_test)
confusion_matrix(y_test, y_pred)

# In[11]:

accuracy_score(y_test, y_pred)

# In[12]:

f1_score(y_test, y_pred, average='macro')

# <h2> <b> Ada-Boost with decision Tree as base-estimator ( An ensemble Approach)

# In[13]:

Model = adaBoost(n_estimators=100, base_estimator=dtclf(), learning_rate=0.98)
Model.fit(X_train, y_train)
y_pred = model.predict(X_test)

# In[14]:

confusion_matrix(y_test, y_pred)

# In[15]:

accuracy_score(y_test, y_pred)

# In[16]:

f1_score(y_test, y_pred, average='macro')