Example #1
0
    # print(lam, scores)
    acc = np.mean(scores)
    if not best_results or best_results['score'] < acc:
        best_results = {'lam': lam, 'score': acc}

# EasyMKL-BASED
#############################################################################################
clf = EasyMKL(learner=base_learner,
              lam=best_results['lam']).fit(k1 + k2 + k3 + k4 + k5 + k6, y_tr_A)
print(clf)
#############################################################################################
# evaluate the solution
from sklearn.metrics import accuracy_score, roc_auc_score

y_pred = clf.predict(k8)  # predictions
y_score = clf.decision_function(k8)  # rank
accuracy = accuracy_score(y_te_A, y_pred)
roc_auc = roc_auc_score(y_te_A, y_score)
print('Accuracy score: %.3f, roc_AUC score: %.3f' % (accuracy, roc_auc))
print('accuracy on the test set: %.3f, with lambda=%.2f' %
      (accuracy, best_results['lam']))
###############################################################################################
# # Calling confusion matrix and plotting classification report
from sklearn.metrics import classification_report, confusion_matrix, cohen_kappa_score, roc_curve, auc, roc_auc_score

print(classification_report(y_te_A, y_pred))
cm = cnf_matrix = confusion_matrix(y_te_A, y_pred)
print(cm)
total = sum(sum(cnf_matrix))
ACC = (cnf_matrix[0, 0] + cnf_matrix[1, 1]) / total
sensitivity = Recall = cnf_matrix[0, 0] / (cnf_matrix[0, 0] + cnf_matrix[1, 0])
Example #2
0
 y_predMKL_tr = clfEasy.predict(
     KLtr)  # predictions mkl  train
 average_kernel_results['fpr'].append(fpr_avg)
 average_kernel_results['tpr'].append(tpr_avg)
 average_kernel_results['train_date'].append(testDate)
 average_kernel_results['data_date'].append(dataDate)
 average_kernel_results['Average precision-recall score'].append(\
     average_precision_score(y_pred_tr, y_score_tr))
 average_kernel_results['thresholds'].append(thresholds_avg)
 average_kernel_results['test_recall'].append(
     recall_score(yte, y_pred_te, average='weighted'))
 average_kernel_results['train_recall'].append(
     recall_score(ytr, y_pred_tr, average='weighted'))
 fpr_avg = None
 tpr_avg = None
 y_scoreMKL_te = clfEasy.decision_function(
     KLte)  # predictions
 y_scoreMKL_tr = clfEasy.decision_function(KLtr)  # rank
 fprMKL, tprMKL, thresholdsMKL = roc_curve(
     yte.ravel(), y_scoreMKL_te.ravel())
 MKL_results['fpr'].append(fprMKL)
 MKL_results['tpr'].append(tprMKL)
 MKL_results['f1_score'].append(
     f1_score(ytr, y_predMKL_tr, average='macro'))
 MKL_results['thresholds'].append(thresholdsMKL)
 MKL_results['weights'].append(clfEasy.weights)
 MKL_results['train_date'].append(testDate)
 MKL_results['data_date'].append(dataDate)
 MKL_results['Average precision-recall score'].append(
     average_precision_score(y_predMKL_tr, y_scoreMKL_tr))
 MKL_results['test_recall'].append(
     recall_score(yte, y_predMKL_te, average='weighted'))
Example #3
0
#MKL algorithms
from MKLpy.algorithms import AverageMKL, EasyMKL

print('training EasyMKL with one-vs-all multiclass strategy...', end='')
from sklearn.svm import SVC
base_learner = SVC(C=0.1)
clf = EasyMKL(lam=0.1, multiclass_strategy='ova',
              learner=base_learner).fit(KLtr, Ytr)
from MKLpy.multiclass import OneVsRestMKLClassifier, OneVsOneMKLClassifier
print('done')
print('the combination weights are:')
for sol in clf.solution:
    print('(%d vs all): ' % sol, clf.solution[sol].weights)

#evaluate the solution
from sklearn.metrics import accuracy_score, roc_auc_score
import numpy as np
y_pred = clf.predict(KLte)  #predictions
y_score = clf.decision_function(KLte)  #rank
accuracy = accuracy_score(Yte, y_pred)
print('Accuracy score: %.3f' % (accuracy))

print('training EasyMKL with one-vs-one multiclass strategy...', end='')
clf = EasyMKL(lam=0.1, multiclass_strategy='ovo',
              learner=base_learner).fit(KLtr, Ytr)
print('done')
print('the combination weights are:')
for sol in clf.solution:
    print('(%d vs %d): ' % (sol[0], sol[1]), clf.solution[sol].weights)
Example #4
0
def MultiView_learning():
    """MultiView learning"""
    print('loading dataset...', end='')

    training_data = io.loadmat(
        r"D:\CVProject\CBAM-keras-master\handcraft\features_with_pca_file_0202.mat"
    )
    length = len(training_data['array'][0])
    X, Y = training_data['array'][:, 0:length - 2], training_data['array'][:,
                                                                           -1]
    print('done')

    # preprocess data
    print('preprocessing data...', end='')
    from MKLpy.preprocessing import normalization, rescale_01
    X = rescale_01(X)  # feature scaling in [0,1]
    X = normalization(X)  # ||X_i||_2^2 = 1

    # train/test split
    from sklearn.model_selection import train_test_split
    Xtr, Xte, Ytr, Yte = train_test_split(X,
                                          Y,
                                          test_size=.1,
                                          random_state=42,
                                          shuffle=True)

    print(numpy.array(Xtr).shape)
    print(numpy.array(Ytr).shape)

    print('done')
    print('Training on {0} samples, Testing on {1} samples'.format(
        len(Xtr), len(Xte)))

    print('computing RBF Kernels...', end='')

    from MKLpy.metrics import pairwise
    from MKLpy.generators import Multiview_generator

    X1_tr = numpy.array(Xtr[:, :2])  # time
    X2_tr = numpy.array(Xtr[:, 2:92])  # color
    X3_tr = numpy.array(Xtr[:, 92:124])  # Gabor
    X4_tr = numpy.array(Xtr[:, 124:156])  # lbp
    X5_tr = numpy.array(Xtr[:, 156:348])  # cloud
    X6_tr = numpy.array(Xtr[:, 348:432])  # haze
    X7_tr = numpy.array(Xtr[:, 432:603])  # contrast
    X8_tr = numpy.array(Xtr[:, 603:606])  # shadow
    X9_tr = numpy.array(Xtr[:, 606:608])  # snow
    X10_tr = numpy.array(Xtr[:, 608:])  # pca

    X1_te = numpy.array(Xte[:, :2])  # time
    X2_te = numpy.array(Xte[:, 2:92])  # color
    X3_te = numpy.array(Xte[:, 92:124])  # Gabor
    X4_te = numpy.array(Xte[:, 124:156])  # lbp
    X5_te = numpy.array(Xte[:, 156:348])  # cloud
    X6_te = numpy.array(Xte[:, 348:432])  # haze
    X7_te = numpy.array(Xte[:, 432:603])  # contrast
    X8_te = numpy.array(Xte[:, 603:606])  # shadow
    X9_te = numpy.array(Xte[:, 606:608])  # snow
    X10_te = numpy.array(Xte[:, 608:])  # pca

    KLtr = Multiview_generator([
        X1_tr, X2_tr, X3_tr, X4_tr, X5_tr, X6_tr, X7_tr, X8_tr, X9_tr, X10_tr
    ],
                               kernel=pairwise.rbf_kernel)
    KLte = Multiview_generator([
        X1_te, X2_te, X3_te, X4_te, X5_te, X6_te, X7_te, X8_te, X9_te, X10_te
    ], [X1_tr, X2_tr, X3_tr, X4_tr, X5_tr, X6_tr, X7_tr, X8_tr, X9_tr, X10_tr],
                               kernel=pairwise.rbf_kernel)

    print('done')

    from MKLpy.algorithms import AverageMKL, EasyMKL
    print('training EasyMKL with one-vs-all multiclass strategy...', end='')
    from sklearn.svm import SVC
    base_learner = SVC(C=8)
    clf = EasyMKL(lam=0.1, multiclass_strategy='ova',
                  learner=base_learner).fit(KLtr, Ytr)

    print('the combination weights are:')
    for sol in clf.solution:
        print('(%d vs all): ' % sol, clf.solution[sol].weights)

    from sklearn.metrics import accuracy_score, roc_auc_score, recall_score, confusion_matrix
    y_pred = clf.predict(KLte)  # predictions
    y_score = clf.decision_function(KLte)  # rank
    accuracy = accuracy_score(Yte, y_pred)
    print('Accuracy score: %.4f' % (accuracy))
    recall = recall_score(Yte, y_pred, average='macro')
    print('Recall score: %.4f' % (recall))
    cm = confusion_matrix(Yte, y_pred)
    print('Confusion matrix', cm)

    print('training EasyMKL with one-vs-one multiclass strategy...', end='')
    clf = EasyMKL(lam=0.1, multiclass_strategy='ovo',
                  learner=base_learner).fit(KLtr, Ytr)
    print('done')
    print('the combination weights are:')
    for sol in clf.solution:
        print('(%d vs %d): ' % (sol[0], sol[1]), clf.solution[sol].weights)

    y_pred = clf.predict(KLte)  # predictions
    y_score = clf.decision_function(KLte)  # rank
    accuracy = accuracy_score(Yte, y_pred)
    print('Accuracy score: %.4f' % (accuracy))
    recall = recall_score(Yte, y_pred, average='macro')
    print('Recall score: %.4f' % (recall))
    cm = confusion_matrix(Yte, y_pred)
    print('Confusion matrix', cm)