Exemple #1
0
def ecoc_prediction_single(feature, boosts, ecoc):
    '''

    :param feature:
    :param boosts:
    :param ecoc: ecoc for predicting
    :return:
    '''
    min_hamming_dist = 1.
    match_label = 0
    code = []
    for b in boosts:
        c_pred = b.predict_single(feature)

        code.append(1 if c_pred == 1 else 0)  # replace -1 with 0
    for ind, c in enumerate(ecoc):
        cur_hd = hamming(c, code)
        if cur_hd < min_hamming_dist:
            min_hamming_dist = cur_hd
            match_label = ind
    return match_label
 def k_folding_adaboost_parameter_tuning(self, k, tuning_param):
     total_fold_size = len(self.total_fold)
     div = div = total_fold_size % k
     part = int(total_fold_size / k)
     max = -99
     accuracy_list = []
     for i in range(0, k):
         #if i!=4:
         #   continue
         validation_fold_dataset, validation_fold_label, train_fold_dataset, train_fold_label = self.calculate5folding(
             i, self.total_fold, self.label, part)
         boosting = Boosting.Boosting(tuning_param, train_fold_dataset,
                                      train_fold_label,
                                      validation_fold_dataset,
                                      validation_fold_label,
                                      len(train_fold_dataset))
         boosting.fit()
         accuracy = boosting.predict()
         print("Model accuracy", accuracy)
         accuracy_list.append(accuracy)
     return np.mean(np.asarray(accuracy_list)), np.std(
         np.asarray(accuracy_list))
    def repeatTheLearningProcess(self, bestGridSearched, set):
        _, bestParams = self.getBestGridSearchedModel(bestGridSearched, set)

        if bestGridSearched.learnerType == 'KNN':
            bestGridSearched = KNN.KNNLearner(**bestParams,
                                              datasetNo=set.datasetNo)
        elif bestGridSearched.learnerType == 'DT':
            bestGridSearched = DT.DTLearner(**bestParams,
                                            datasetNo=set.datasetNo)
        elif bestGridSearched.learnerType == 'SVM':
            bestGridSearched = SVM.SVMLearner(**bestParams,
                                              datasetNo=set.datasetNo)
        elif bestGridSearched.learnerType == 'Boosting':
            bestGridSearched = Boosting.BoostingLearner(
                **bestParams, datasetNo=set.datasetNo)
        elif bestGridSearched.learnerType == 'ANN':
            bestGridSearched = ANN.ANNLearner(**bestParams,
                                              datasetNo=set.datasetNo)

        self.getLearningCurve(bestGridSearched, set)
        self.getComplexityCurve(bestGridSearched, set)

        return bestGridSearched
Exemple #4
0
import numpy as np
from skimage.transform import integral_image
import cv2
from FeatureHaar import*
from Boosting import*

video = cv2.VideoCapture('OpenCV-Trackers/assets/chaplin.mp4')

ret, frame = video.read()
roi = cv2.selectROI(frame)
frame = cv2.cvtColor(frame,cv2.COLOR_RGB2GRAY)
roi_ls = [roi[0],roi[1],roi[0]+roi[2],roi[1]+roi[3]]
tracker = Boosting(frame,roi_ls,150,12500,50,2)
tracker.get_search_region()
tracker.set_ii_searchregion()
tracker.build_features()
tracker.init_selector_pool()
tracker.train_weak_classifier()
tracker.get_strong_classifier()

while(video.isOpened()):
    ret, frame = video.read()

    frame = cv2.cvtColor(frame,cv2.COLOR_RGB2GRAY)
    tracker.update_frame(frame)
    tracker.get_confidence_map()
    new_roi = tracker.get_meanshift_bbox()
    tracker.update_roi(new_roi)
    tracker.get_search_region()
    cv2.rectangle(frame,(new_roi[0],new_roi[1]),(new_roi[2],new_roi[3]),(0,255,0),2)
    cv2.imshow('frame',frame)
Exemple #5
0
n_param = tuning_parameter()
'''bagging=Bagging.Bagging(20)
bagging.BaggingClassifier(training_dataset,label_train,test_dataset,label_test,14000)
#print("final accuracy :",accuracy)

bagging=Bagging.Bagging(50)
bagging.BaggingClassifier(training_dataset,label_train,test_dataset,label_test,14000)

bagging=Bagging.Bagging(100)
bagging.BaggingClassifier(training_dataset,label_train,test_dataset,label_test,14000)

bagging=Bagging.Bagging(200)
bagging.BaggingClassifier(training_dataset,label_train,test_dataset,label_test,14000)'''
#n_param=100

#bagging=Bagging.Bagging(n_param)
#bagging.BaggingClassifier(training_dataset,label_train,training_dataset,label_train,14000)

boosting = Boosting.Boosting(n_param, training_dataset, label_train,
                             test_dataset, label_test, 14000)
boosting.fit()
accuracy = boosting.predict()
print("=====================================")
print("final accuracy :", accuracy)
'''accuracy=Classify.Gaussian_classify_NB(training_dataset,label_train,test_dataset,label_test)
print("======================================")
print("actual naive bayes accuracy :",accuracy)'''
'''clf=AdaBoostClassifier(base_estimator=DecisionTreeClassifier(max_leaf_nodes=5,max_depth=2), n_estimators=150)
clf.fit(training_dataset,label_train)
print("original adaboost",clf.score(test_dataset,label_test))'''
__author__ = 'kesavsundar'
import Boosting
import sys
sys.path.append('/home/kesavsundar/Dropbox/CS6140_K_Gopal/General_Modules')
import tenfold

if __name__ == '__main__':
    ada_boost = Boosting()
    ada_boost.init_boosting_model()
    tf = tenfold.Tenfold(ada_boost.xy_data, ada_boost)
    tf.inbuilt_tenfold_train()
Exemple #7
0
# img_ii = integral_image(img)
# print(img_ii)
# roi = [1,1,4,4]
# bo = Boosting(img,roi,10,30,6,2)
# bo.get_search_region()
# bo.set_ii_searchregion()
# bo.build_features()
# bo.init_selector_pool()
# bo.train_weak_classifier()

img_org = cv2.imread("./assets/edge-detection.png")
roi = cv2.selectROI(img_org)
print(roi)
img = cv2.cvtColor(img_org, cv2.COLOR_RGB2GRAY)
roi_image = img[roi[1]:roi[1] + roi[3], roi[0]:roi[0] + roi[2]]
cv2.imshow("roiImage", roi_image)
roi_ls = [roi[0], roi[1], roi[0] + roi[2], roi[1] + roi[3]]
bo = Boosting(img, roi_ls, 150, 12500, 50, 2)
bo.get_search_region()
bo.set_ii_searchregion()
bo.build_features()
bo.init_selector_pool()
bo.train_weak_classifier()
bo.get_strong_classifier()
bo.get_confidence_map()
new_roi = bo.get_meanshift_bbox()
#new_roi = bo.get_bbox()
cv2.rectangle(img_org, (new_roi[0], new_roi[1]), (new_roi[2], new_roi[3]),
              (0, 255, 0), 2)
cv2.imshow('newroi', img_org)
cv2.waitKey(0)
Exemple #8
0
#                 [ 4  ,8 ,12 ,16 ,20 ,24],
#                 [ 5 ,10 ,15 ,20 ,25 ,30],
#                 [ 6 ,12 ,18 ,24 ,30 ,36]])
img = np.array([[1, 2, 3, 4, 15, 6], [2, 4, 6, 8, 1, 2], [3, 6, 19, 2, 5, 1],
                [6, 3, 13, 6, 0, 4], [0, 11, 15, 21, 2, 3],
                [21, 1, 18, 14, 3, 16]])
# print('IMAGE:')
# print(img)

print('IMAGE DIMENSIONS')
print('{} * {}'.format(img.shape[0], img.shape[1]))

img_ii = integral_image(img)
print(img_ii)
roi = [1, 1, 4, 4]
bo = Boosting(img, roi, 10, 30, 6, 2)
bo.get_search_region()
bo.set_ii_searchregion()
bo.build_features()
bo.init_selector_pool()
bo.train_weak_classifier()

# img = cv2.imread("./assets/edge-detection.png")
# roi = cv2.selectROI(img)
# img = cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)
# roi_image = img[roi[1]:roi[1]+roi[3],roi[0]:roi[0]+roi[2]]
# cv2.imshow("roiImage",roi_image)
# roi_ls = [roi[0],roi[1],roi[0]+roi[2],roi[1]+roi[3]]
# bo = Boosting(img,roi_ls,10,12500,50,2)
# bo.get_search_region()
# bo.set_ii_searchregion()
Exemple #9
0
mySLFN.set_initDistrib(initDistrib)  # Set the initialization
mySLFN.set_trainigAlg(trainingAlg)  # Set the trainig algorithm

mySLFN.set_stopCrit(stopCriterion)  # Set the Stop Criterion
mySLFN.set_regularization(regularization)  # Set regularization

mySLFN.set_visual([visual])  # Visualization of output

#################################################################
########################## Boosting #########################
#################################################################

myBust = Boosting.CBoosting(T=T,
                            alg="RealAdaBoost",
                            CV=CV,
                            Nruns=Nruns,
                            InitRandomSeed=InitRandomSeed)
# GentleBoost  RealAdaBoost

myBust.set_Train(Xtrain, Ytrain)
myBust.set_Val(Xtest, Ytest)
myBust.set_Test(Xtest, Ytest)

myBust.set_Classifier(mySLFN)

myBust.set_stopCrit(sCBust)

myBust.train()

myBust.output_stuff(
Exemple #10
0
    train_times.append(train_time)
    test_times.append(query_time)
    train, test, train_time, query_time = svm1.main('rbf', .05, 1, train_x,
                                                    test_x, train_y, test_y)
    train_scores.append(train)
    test_scores.append(test)
    train_times.append(train_time)
    test_times.append(query_time)
    train, test, train_time, query_time = nn1.main(4, (20, ), .005, 30,
                                                   train_x, test_x, train_y,
                                                   test_y)
    train_scores.append(train)
    test_scores.append(test)
    train_times.append(train_time)
    test_times.append(query_time)
    train, test, train_time, query_time = b1.main(1, 1, 50, train_x, test_x,
                                                  train_y, test_y)
    train_scores.append(train)
    test_scores.append(test)
    train_times.append(train_time)
    test_times.append(query_time)

    # Bar graph plotting from https://www.tutorialspoint.com/matplotlib/matplotlib_bar_plot.htm
    # https://pythonspot.com/matplotlib-bar-chart/
    plt.figure(figsize=(9, 5))
    plt.ylim(.84, .94)
    plt.bar(labels, train_scores)
    plt.title("Training Scores")
    plt.xlabel("Classifiers")
    plt.ylabel("Accuracy Scores")
    plt.savefig('Report Training Scores BD.png')
    plt.close()