def classify_new_sounds(folder_of_test_sounds, target_class): new_descriptors = {target_class: load_description(folder_of_test_sounds)} new_normalized_features, new_y_class, new_features_names = preprocessDescriptors(new_descriptors) new_y_class = np.array([target_class]*len(new_y_class)) descriptors = loadDescriptors(maximum='Inf', reverbs=True) normalized_features, yClass, features_names = preprocessDescriptors(descriptors) clf = trainSVM(normalized_features, yClass, call=True) F1 = F1Score(new_normalized_features, new_y_class, clf) return F1
def classify_new_sounds(folder_of_test_sounds, target_class): new_descriptors = {target_class: load_description(folder_of_test_sounds)} new_normalized_features, new_y_class, new_features_names = preprocessDescriptors( new_descriptors) new_y_class = np.array([target_class] * len(new_y_class)) descriptors = loadDescriptors(maximum='Inf', reverbs=True) normalized_features, yClass, features_names = preprocessDescriptors( descriptors) clf = trainSVM(normalized_features, yClass, call=True) F1 = F1Score(new_normalized_features, new_y_class, clf) return F1
def run_stats_analysis(maximum='Inf', reverbs=True): Descriptors = loadDescriptors(maximum, reverbs) normalized_features, yClass, features_names = preprocessDescriptors(Descriptors) every_f1_test, mMean, mVar = test_repe(100, normalized_features, yClass) instrument_count = count_instruments(yClass) mean = mean_per_instrument(every_f1_test) stand = std_per_instrument(every_f1_test) return mean, stand, instrument_count
def run_stats_analysis(maximum='Inf', reverbs=True): Descriptors = loadDescriptors(maximum, reverbs) normalized_features, yClass, features_names = preprocessDescriptors( Descriptors) every_f1_test, mMean, mVar = test_repe(100, normalized_features, yClass) instrument_count = count_instruments(yClass) mean = mean_per_instrument(every_f1_test) stand = std_per_instrument(every_f1_test) return mean, stand, instrument_count
# -*- coding: utf-8 -*- from sklearn import svm import numpy as np import matplotlib.pyplot as plt from utils.FScore import F1Score from Identification.LoadDescriptors import loadAllDescriptors from Identification.PreprocessingDescriptors import preprocessDescriptors from Identification.TrainCvTest import separateDatabases Descriptors = loadAllDescriptors(reverbs=True) normalized_features, yClass, features_names = preprocessDescriptors(Descriptors) del Descriptors # Ya no lo voy a utilizar normalizedTrain, yTrain, normalizedCV, yCV, normalizedTest, yTest = separateDatabases(normalized_features, yClass) def test_data_size(training_features, training_classes, test_features, test_classes): index = np.arange(0, len(training_classes)) np.random.shuffle(index) test_size = np.linspace(0.1, 1, 50) * len(index) test_size = [int(i) for i in test_size] f_train = [] f_cv = [] clf = svm.SVC(C=1.833, gamma=0.1366, cache_size=1000) for iii in test_size: clf.fit(training_features[index[0:iii]], training_classes[index[0:iii]]) f_train = np.append(f_train, np.mean(F1Score(training_features[index[0:iii]],