示例#1
0
"""! 
@brief Example 19
@details Musical genre classification example
@author Theodoros Giannakopoulos {[email protected]}
"""
import numpy as np, plotly, plotly.graph_objs as go
from pyAudioAnalysis.audioFeatureExtraction import dirWavFeatureExtraction as dW
from sklearn.svm import SVC
import utilities as ut
name_1, name_2 = "spectral_entropy_std", "chroma_std_std"
layout = go.Layout(title='Musical Genre Classification Example',
                   xaxis=dict(title=name_1,), yaxis=dict(title=name_2,))

if __name__ == '__main__':
    # get features from folders (all classes):
    f1, _, fn1 = dW("../data/musical_genres_8k/blues", 2, 1, 0.1, 0.1)
    f2, _, fn2 = dW("../data/musical_genres_8k/electronic", 2, 1, 0.1, 0.1)
    f3, _, fn3 = dW("../data/musical_genres_8k/jazz", 2, 1, 0.1, 0.1)
    # plot histograms for each feature and normalize
    ut.plot_feature_histograms([f1, f2, f3], fn1, ["blues", "electro", "jazz"])
    # concatenate features to extract overall mean and std ...
    f1 = np.array([f1[:, fn1.index(name_1)], f1[:, fn1.index(name_2)]]).T
    f2 = np.array([f2[:, fn1.index(name_1)], f2[:, fn1.index(name_2)]]).T
    f3 = np.array([f3[:, fn1.index(name_1)], f3[:, fn1.index(name_2)]]).T
    f = np.concatenate((f1, f2, f3), axis = 0)
    mean, std = f.mean(axis=0), np.std(f, axis=0)
    f1 = (f1 - mean) / std; f2 = (f2 - mean) / std;  f3 = (f3 - mean) / std
    f = (f - mean) / std
    # plot selected 2D features
    plt1 = go.Scatter(x=f1[:, 0], y=f1[:, 1], mode='markers', name="blues",
                      marker=dict(size=10,color='rgba(255, 182, 193, .9)',))
示例#2
0
@details Audio event detection: features discrimination and
2D-feature classification
@author Theodoros Giannakopoulos {[email protected]}
"""
import numpy as np, plotly, plotly.graph_objs as go
from pyAudioAnalysis.audioFeatureExtraction import dirWavFeatureExtraction as dW
from sklearn.svm import SVC
import utilities as ut
name_1, name_2 = "spectral_spread_std", "mfcc_5_mean"
layout = go.Layout(title='Activity Detection Example',
                   xaxis=dict(title=name_1, ),
                   yaxis=dict(title=name_2, ))

if __name__ == '__main__':
    # get features from folders (all classes):
    f1, _, fn1 = dW("../data/activity_sounds/cupboards", 1, 1, 0.05, 0.05)
    f2, _, fn1 = dW("../data/activity_sounds/door", 1, 1, 0.05, 0.05)
    f3, _, fn1 = dW("../data/activity_sounds/silence", 1, 1, 0.05, 0.05)
    f4, _, fn1 = dW("../data/activity_sounds/walk", 1, 1, 0.05, 0.05)

    # plot histograms for each feature and normalize
    ut.plot_feature_histograms([f1, f2, f3, f4], fn1,
                               ["cupboards", "door", "silence", "walk"])
    # concatenate features to extract overall mean and std ...
    f1 = np.array([f1[:, fn1.index(name_1)], f1[:, fn1.index(name_2)]]).T
    f2 = np.array([f2[:, fn1.index(name_1)], f2[:, fn1.index(name_2)]]).T
    f3 = np.array([f3[:, fn1.index(name_1)], f3[:, fn1.index(name_2)]]).T
    f4 = np.array([f4[:, fn1.index(name_1)], f4[:, fn1.index(name_2)]]).T

    f = np.concatenate((f1, f2, f3, f4), axis=0)
    mean, std = f.mean(axis=0), np.std(f, axis=0)
示例#3
0
"""! 
@brief Example 18
@details speech music classification example
@author Theodoros Giannakopoulos {[email protected]}
"""
import numpy as np, plotly, plotly.graph_objs as go
from pyAudioAnalysis.audioFeatureExtraction import dirWavFeatureExtraction as dW
from sklearn.svm import SVC
import utilities as ut
name_1, name_2 = "mfcc_3_std", "energy_entropy_mean"
layout = go.Layout(title='Speech Music Classification Example',
                   xaxis=dict(title=name_1,), yaxis=dict(title=name_2,))

if __name__ == '__main__':
    # get features from folders (all classes):
    f1, _, fn1 = dW("../data/speech_music/speech", 1, 1, 0.1, 0.1)
    f2, _, fn2 = dW("../data/speech_music/music", 1, 1, 0.1, 0.1)
    # plot histograms for each feature and normalize
    ut.plot_feature_histograms([f1, f2], fn1, ["speech", "music"])
    # concatenate features to extract overall mean and std ...
    f1 = np.array([f1[:, fn1.index(name_1)], f1[:, fn1.index(name_2)]]).T
    f2 = np.array([f2[:, fn1.index(name_1)], f2[:, fn1.index(name_2)]]).T
    f = np.concatenate((f1, f2), axis = 0)
    mean, std = f.mean(axis=0), np.std(f, axis=0)
    f1 = (f1 - mean) / std; f2 = (f2 - mean) / std; f = (f - mean) / std
    # plot selected 2D features
    plt1 = go.Scatter(x=f1[:, 0], y=f1[:, 1], mode='markers', name="speech")
    plt2 = go.Scatter(x=f2[:, 0], y=f2[:, 1], mode='markers', name="music")
    # get classification decisions for grid
    y = np.concatenate((np.zeros(f1.shape[0]), np.ones(f2.shape[0])))
    cl = SVC(kernel='rbf', C=0.1)
示例#4
0
    return go.Scatter(x=cbins, y=h_test, name=name)


if __name__ == '__main__':
    arg = parseArguments()
    target_type = arg.target[0]
    if os.path.isfile(target_type + ".bin"):
        x, y, filenames = joblib.load(target_type + ".bin")
    else:
        gt = {}
        with open('../data/music_data_small/{}.csv'.format(target_type)) \
                as csvfile:
            reader = csv.reader(csvfile, delimiter=',')
            for row in reader:
                gt[row[0]] = row[1]
        f, f_names, fn1 = dW("../data/music_data_small", 2, 2, 0.1, 0.1)
        x, y, filenames = [], [], []
        for i_f, f_name in enumerate(f_names):
            if os.path.basename(f_name) in gt:
                x.append(f[i_f])
                filenames.append(f_names)
                y.append(float(gt[os.path.basename(f_name)]))
        x = np.array(x)
        y = np.array(y)
        joblib.dump((x, y, filenames), target_type + ".bin")

    figs = plotly.tools.make_subplots(rows=1,
                                      cols=2,
                                      subplot_titles=[
                                          "Distribution of real "
                                          "y and predicted y",
示例#5
0
"""! 
@brief Example 20
@details Musical genre classification example. Classification performance
@author Theodoros Giannakopoulos {[email protected]}
"""
import numpy as np
import utilities as ut
from pyAudioAnalysis.audioFeatureExtraction import dirWavFeatureExtraction as dW

if __name__ == '__main__':
    # extract features, concatenate feature matrices and normalize:
    mw, stw = 2, .1
    f1, _, fn1 = dW("../data/musical_genres_8k/blues", mw, mw, stw, stw)
    f2, _, fn2 = dW("../data/musical_genres_8k/electronic", mw, mw, stw, stw)
    f3, _, fn3 = dW("../data/musical_genres_8k/jazz", mw, mw, stw, stw)
    x = np.concatenate((f1, f2, f3), axis=0)
    y = np.concatenate((np.zeros(f1.shape[0]), np.ones(f2.shape[0]),
                        2 * np.ones(f2.shape[0])))
    # train svm and get aggregated (average) confusion matrix, accuracy and f1
    cm, acc, f1 = ut.svm_train_evaluate(x, y, 10, C=2)
    # visualize performance measures
    ut.plotly_classification_results(cm, ["blues", "electronic", "jazz"])
    print(acc, f1)
示例#6
0
"""! 
@brief Example 24
@details Soundscape quality classification (through svm classifier)
@author Theodoros Giannakopoulos {[email protected]}
"""
import numpy as np
from pyAudioAnalysis.audioFeatureExtraction import dirWavFeatureExtraction as dW
import utilities as ut

if __name__ == '__main__':
    # get features from folders (all classes):
    f1, _, fn1 = dW("../data/soundScape_small/1/", 2, 1, 0.1, 0.1)
    f3, _, fn1 = dW("../data/soundScape_small/3/", 2, 1, 0.1, 0.1)
    f5, _, fn1 = dW("../data/soundScape_small/5/", 2, 1, 0.1, 0.1)

    x = np.concatenate((f1, f3, f5), axis=0)
    y = np.concatenate((np.zeros(f1.shape[0]), 1 * np.ones(f3.shape[0]),
                        2 * np.ones(f5.shape[0])))
    # train svm and get aggregated (average) confusion matrix, accuracy and f1
    cm, acc, f1 = ut.svm_train_evaluate(x, y, 10, C=10, use_regressor=False)
    # visualize performance measures
    ut.plotly_classification_results(cm, ["q_1", "q_3", "q_5"])
    print(acc, f1)