示例#1
0
"""! 
@brief Example 20
@details Musical genre classification example. Classification performance
@author Theodoros Giannakopoulos {[email protected]}
"""
import numpy as np
import utilities as ut
from pyAudioAnalysis.MidTermFeatures import directory_feature_extraction as dW

if __name__ == '__main__':
    # extract features, concatenate feature matrices and normalize:
    mw, stw = 2, .1
    f1, _, fn1 = dW("../data/musical_genres_8k/blues", mw, mw, stw, stw)
    f2, _, fn2 = dW("../data/musical_genres_8k/electronic", mw, mw, stw, stw)
    f3, _, fn3 = dW("../data/musical_genres_8k/jazz", mw, mw, stw, stw)
    x = np.concatenate((f1, f2, f3), axis=0)
    y = np.concatenate((np.zeros(f1.shape[0]), np.ones(f2.shape[0]),
                        2 * np.ones(f2.shape[0])))
    # train svm and get aggregated (average) confusion matrix, accuracy and f1
    cm, acc, f1 = ut.svm_train_evaluate(x, y, 10, C=2)
    # visualize performance measures
    ut.plotly_classification_results(cm, ["blues", "electronic", "jazz"])
    print(acc, f1)
示例#2
0
"""! 
@brief Example 23
@details Audio event detection. Classification performance
@author Theodoros Giannakopoulos {[email protected]}
"""
import numpy as np
import utilities as ut
from pyAudioAnalysis.MidTermFeatures import directory_feature_extraction as dW


if __name__ == '__main__':
    # extract features, concatenate feature matrices and normalize:
    f1, _, fn1 = dW("../data/activity_sounds/cupboards", 1, 1, 0.05, 0.05)
    f2, _, fn1 = dW("../data/activity_sounds/door", 1, 1, 0.05, 0.05)
    f3, _, fn1 = dW("../data/activity_sounds/silence", 1, 1, 0.05, 0.05)
    f4, _, fn1 = dW("../data/activity_sounds/walk", 1, 1, 0.05, 0.05)
    x = np.concatenate((f1, f2, f3, f4), axis=0)
    y = np.concatenate((np.zeros(f1.shape[0]), np.ones(f2.shape[0]),
                       2 * np.ones(f3.shape[0]), 3 * np.ones(f4.shape[0])))
    print(x.shape, y.shape)
    # train svm and get aggregated (average) confusion matrix, accuracy and f1
    cm, acc, f1 = ut.svm_train_evaluate(x, y, 2, C=2)
    # visualize performance measures
    ut.plotly_classification_results(cm, ["cupboards", "door", "silence",
                                          "walk"])
    print(acc, f1)

示例#3
0
"""! 
@brief Example 24
@details Soundscape quality classification (through svm classifier)
@author Theodoros Giannakopoulos {[email protected]}
"""
import numpy as np
from pyAudioAnalysis.MidTermFeatures import directory_feature_extraction as dW
import utilities as ut

if __name__ == '__main__':
    # get features from folders (all classes):
    f1, _, fn1 = dW("../data/soundScape_small/1/", 2, 1, 0.1, 0.1)
    f3, _, fn1 = dW("../data/soundScape_small/3/", 2, 1, 0.1, 0.1)
    f5, _, fn1 = dW("../data/soundScape_small/5/", 2, 1, 0.1, 0.1)

    x = np.concatenate((f1, f3, f5), axis=0)
    y = np.concatenate((np.zeros(f1.shape[0]), 1 * np.ones(f3.shape[0]),
                        2 * np.ones(f5.shape[0])))
    # train svm and get aggregated (average) confusion matrix, accuracy and f1
    cm, acc, f1 = ut.svm_train_evaluate(x, y, 10, C=10, use_regressor=False)
    # visualize performance measures
    ut.plotly_classification_results(cm, ["q_1", "q_3", "q_5"])
    print(acc, f1)