Esempio n. 1
0
aT.extract_features_and_train([root_data_path + "1/", root_data_path + "2/"],
                              1.0, 1.0, 0.2, 0.2, "svm", "temp", True)

print("\n\n\n * * * TEST 5 * * * \n\n\n")
[flagsInd, classesAll, acc, CM] = aS.mid_term_file_classification(
    root_data_path + "scottish.wav", root_data_path + "models/svm_rbf_sm",
    "svm_rbf", True, root_data_path + 'pyAudioAnalysis/data/scottish.segments')

print("\n\n\n * * * TEST 6 * * * \n\n\n")
aS.train_hmm_from_file(root_data_path + 'radioFinal/train/bbc4A.wav',
                       root_data_path + 'radioFinal/train/bbc4A.segments',
                       'hmmTemp1', 1.0, 1.0)
aS.train_hmm_from_directory(root_data_path + 'radioFinal/small', 'hmmTemp2',
                            1.0, 1.0)
aS.hmm_segmentation(root_data_path + 'pyAudioAnalysis/data//scottish.wav',
                    'hmmTemp1', True, root_data_path +
                    'pyAudioAnalysis/data//scottish.segments')  # test 1
aS.hmm_segmentation(root_data_path + 'pyAudioAnalysis/data//scottish.wav',
                    'hmmTemp2', True, root_data_path +
                    'pyAudioAnalysis/data//scottish.segments')  # test 2

print("\n\n\n * * * TEST 7 * * * \n\n\n")
aT.feature_extraction_train_regression(root_data_path +
                                       "pyAudioAnalysis/data/speechEmotion",
                                       1,
                                       1,
                                       0.050,
                                       0.050,
                                       "svm_rbf",
                                       "temp.mod",
                                       compute_beat=False)
def main(argv):
    if argv[1] == "-shortTerm":
        for i in range(nExp):
            [Fs, x] = audioBasicIO.read_audio_file("diarizationExample.wav")
            duration = x.shape[0] / float(Fs)
            t1 = time.time()
            F = MidTermFeatures.short_term_feature_extraction(
                x, Fs, 0.050 * Fs, 0.050 * Fs)
            t2 = time.time()
            perTime1 = duration / (t2 - t1)
            print "short-term feature extraction: {0:.1f} x realtime".format(
                perTime1)
    elif argv[1] == "-classifyFile":
        for i in range(nExp):
            [Fs, x] = audioBasicIO.read_audio_file("diarizationExample.wav")
            duration = x.shape[0] / float(Fs)
            t1 = time.time()
            aT.file_classification("diarizationExample.wav", "svmSM", "svm")
            t2 = time.time()
            perTime1 = duration / (t2 - t1)
            print "Mid-term feature extraction + classification \t {0:.1f} x realtime".format(
                perTime1)
    elif argv[1] == "-mtClassify":
        for i in range(nExp):
            [Fs, x] = audioBasicIO.read_audio_file("diarizationExample.wav")
            duration = x.shape[0] / float(Fs)
            t1 = time.time()
            [flagsInd, classesAll,
             acc] = aS.mid_term_file_classification("diarizationExample.wav",
                                                    "svmSM", "svm", False, '')
            t2 = time.time()
            perTime1 = duration / (t2 - t1)
            print "Fix-sized classification - segmentation \t {0:.1f} x realtime".format(
                perTime1)
    elif argv[1] == "-hmmSegmentation":
        for i in range(nExp):
            [Fs, x] = audioBasicIO.read_audio_file("diarizationExample.wav")
            duration = x.shape[0] / float(Fs)
            t1 = time.time()
            aS.hmm_segmentation('diarizationExample.wav', 'hmmRadioSM', False,
                                '')
            t2 = time.time()
            perTime1 = duration / (t2 - t1)
            print "HMM-based classification - segmentation \t {0:.1f} x realtime".format(
                perTime1)
    elif argv[1] == "-silenceRemoval":
        for i in range(nExp):
            [Fs, x] = audioBasicIO.read_audio_file("diarizationExample.wav")
            duration = x.shape[0] / float(Fs)
            t1 = time.time()
            [Fs, x] = audioBasicIO.read_audio_file("diarizationExample.wav")
            segments = aS.silence_removal(x,
                                          Fs,
                                          0.050,
                                          0.050,
                                          smooth_window=1.0,
                                          Weight=0.3,
                                          plot=False)
            t2 = time.time()
            perTime1 = duration / (t2 - t1)
            print "Silence removal \t {0:.1f} x realtime".format(perTime1)
    elif argv[1] == "-thumbnailing":
        for i in range(nExp):
            [Fs1, x1] = audioBasicIO.read_audio_file("scottish.wav")
            duration1 = x1.shape[0] / float(Fs1)
            t1 = time.time()
            [A1, A2, B1, B2,
             Smatrix] = aS.music_thumbnailing(x1, Fs1, 1.0, 1.0,
                                              15.0)  # find thumbnail endpoints
            t2 = time.time()
            perTime1 = duration1 / (t2 - t1)
            print "Thumbnail \t {0:.1f} x realtime".format(perTime1)
    elif argv[1] == "-diarization-noLDA":
        for i in range(nExp):
            [Fs1, x1] = audioBasicIO.read_audio_file("diarizationExample.wav")
            duration1 = x1.shape[0] / float(Fs1)
            t1 = time.time()
            aS.speaker_diarization("diarizationExample.wav",
                                   4,
                                   LDAdim=0,
                                   PLOT=False)
            t2 = time.time()
            perTime1 = duration1 / (t2 - t1)
            print "Diarization \t {0:.1f} x realtime".format(perTime1)
    elif argv[1] == "-diarization-LDA":
        for i in range(nExp):
            [Fs1, x1] = audioBasicIO.read_audio_file("diarizationExample.wav")
            duration1 = x1.shape[0] / float(Fs1)
            t1 = time.time()
            aS.speaker_diarization("diarizationExample.wav", 4, PLOT=False)
            t2 = time.time()
            perTime1 = duration1 / (t2 - t1)
            print "Diarization \t {0:.1f} x realtime".format(perTime1)
Esempio n. 3
0
def segmentclassifyFileWrapperHMM(wavFile, hmmModelName):
    gtFile = wavFile.replace(".wav", ".segments")
    aS.hmm_segmentation(wavFile,
                        hmmModelName,
                        plot_results=True,
                        gt_file=gtFile)