classifiers = [
    ("KNN", None, KNeighborsClassifier(2)),
    ("Linear SVM", None, SVC(kernel="linear")),
    ("RBF SVM", None, SVC(gamma=2, C=1)),
    ("DT", None, DecisionTreeClassifier(min_samples_split=1024, max_depth=20)),
    ("RF", None,
     RandomForestClassifier(n_estimators=10,
                            min_samples_split=1024,
                            max_depth=20)),
    ("AB", None, AdaBoostClassifier(random_state=13370)),
    #("GP ARD", ["MFCC"], gp.GaussianProcessClassifier(kernel=ard_kernel(sigma=1.2, length_scale=np.array([1]*1)))),
    ("GP-DP", ["MFCC", "All", "CIFE", "CFS"],
     gp.GaussianProcessClassifier(kernel=gp.kernels.DotProduct()))
    # output the confidence level and the predictive variance for the dot product (the only one that we keep in the end)
    # GP beats SVM in our experiment (qualitative advantages)
    # only keep RBF, dot product and matern on the chart
    # add a paragraph 'Processed Data'
    #1) generate the dataset with 526 features
    #2) the predictive variance and predictive mean (best and worst) of some vectors from the dot product.
]
#classify(X_train[:,bitVec], X_dev[:,bitVec])
models_f1, models_performances = getClassifieresPerformances(
    classifiers, models_f1, models_performances)
#models_f1, models_performances = getClassifieresPerformancesByDefinedX(classifiers, 'predict', models_f1, models_performances, newTrainX, y_bin_train, newDevX)
models_f1, models_performances = addRelatedWork(models_f1, models_performances)
models_f1 = sorted(models_f1, key=lambda l: l[1])
models_performances = sorted(models_performances, key=lambda l: l[1])

plot_f1(models_f1)
printPerformances(models_performances)
Пример #2
0
def showPerformance(models_f1, models_performances):
	plot_f1(models_f1)
	printPerformances(models_performances)