示例#1
0
 def train(self):
     print 'train classifier'
     trainer.featureAndTrain(
         listOfDirs=self.class_dir_list,
         mtWin=1.0,
         mtStep=1.0,
         stWin=trainer.shortTermWindow,
         stStep=trainer.shortTermStep,
         classifierType=self.classifier_type,
         modelName=self.classifier_path
     )
     print 'classifier trained'
from pyAudioAnalysis import audioTrainTest as aT
aT.featureAndTrain([
    "/home/brandonjabr/pyAudio/pyAudioAnalysis/wav/speech_commands/_background_noise_",
    "/home/brandonjabr/pyAudio/pyAudioAnalysis/wav/speech_commands/one",
    "/home/brandonjabr/pyAudio/pyAudioAnalysis/wav/speech_commands/two",
    "/home/brandonjabr/pyAudio/pyAudioAnalysis/wav/speech_commands/three",
    "/home/brandonjabr/pyAudio/pyAudioAnalysis/wav/speech_commands/four",
    "/home/brandonjabr/pyAudio/pyAudioAnalysis/wav/speech_commands/five",
    "/home/brandonjabr/pyAudio/pyAudioAnalysis/wav/speech_commands/six",
    "/home/brandonjabr/pyAudio/pyAudioAnalysis/wav/speech_commands/seven",
    "/home/brandonjabr/pyAudio/pyAudioAnalysis/wav/speech_commands/eight",
    "/home/brandonjabr/pyAudio/pyAudioAnalysis/wav/speech_commands/nine",
    "/home/brandonjabr/pyAudio/pyAudioAnalysis/wav/speech_commands/yes",
    "/home/brandonjabr/pyAudio/pyAudioAnalysis/wav/speech_commands/no"
], 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, "svm", "svmSMtemp", False)
aT.fileClassification(
    "/home/brandonjabr/pyAudio/pyAudioAnalysis/my-recordings/yes/1529689771.331.wav",
    "svmSMtemp", "svm")
示例#3
0
from pyAudioAnalysis import audioTrainTest as aT
aT.featureAndTrain(["classifierData/music","classifierData/speech"], 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, "svm", "svmSMtemp", False)
aT.fileClassification("data/doremi.wav", "svmSMtemp","svm")
示例#4
0
from pyAudioAnalysis import audioTrainTest as aT
from pyAudioAnalysis import audioSegmentation as aS
from os import path

import os
import sys

modelName = sys.argv[
    1]  # Command line argument 1 is the name of the training model.

mtw = float(sys.argv[2])  # Command line argument 2 is the mid-term window.
mts = float(sys.argv[3])  # Command line argument 3 is the mid-term step.

aT.featureAndTrain(["SetA/Good/", "SetA/Bad/"], mtw, mts, aT.shortTermWindow,
                   aT.shortTermStep, modelName, "Models/" + modelName)

for fileName in [
        f for f in os.listdir("SetB/Good/")
        if path.isfile(path.join("SetB/Good/", f))
]:
    print "Classification result for good file: " + fileName + " is " + aT.fileClassification(
        "SetB/Good/" + fileName, "Models/" + modelName, modelName)

for fileName in [
        f for f in os.listdir("SetB/Bad/")
        if path.isfile(path.join("SetB/Bad/", f))
]:
    print "Classification result for bad file: " + fileName + " is " + aT.fileClassification(
        "SetB/Bad/" + fileName, "Models/" + modelName, modelName)
示例#5
0
                            "sm", "movie8", "speakers", "speaker-gender",
                            "music-genre6", "4class"
                        ],
                        help="Classification task")
    args = parser.parse_args()
    return args


if __name__ == '__main__':
    args = parseArguments()
    root_data_path = args.data_folder
    classifier_type = args.classifier_type

    if args.task == "sm":
        aT.featureAndTrain(
            [root_data_path + "SM/speech", root_data_path + "SM/music"], 1.0,
            1.0, 0.05, 0.05, classifier_type, classifier_type + "_sm", False)
    elif args.task == "movie8":
        aT.featureAndTrain([
            root_data_path + "movieSegments/8-class/Speech",
            root_data_path + "movieSegments/8-class/Music",
            root_data_path + "movieSegments/8-class/Others1",
            root_data_path + "movieSegments/8-class/Others2",
            root_data_path + "movieSegments/8-class/Others3",
            root_data_path + "movieSegments/8-class/Shots",
            root_data_path + "movieSegments/8-class/Fights",
            root_data_path + "movieSegments/8-class/Screams"
        ], 1.0, 1.0, 0.05, 0.05, classifier_type,
                           classifier_type + "_movie8class", False)
    elif args.task == "speakers":
        aT.featureAndTrain([
import os
import sys
from pyAudioAnalysis import audioFeatureExtraction as aF
from pyAudioAnalysis import audioTrainTest as aT
from pyAudioAnalysis import audioBasicIO


#aT.featureAndTrain([""], 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, "svm", "svmMusicGenre3", True)
if __name__ =='__main__':

    #train
    aT.featureAndTrain(['Silence/','Speech/', 'Music/', 'Activity/'], 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, 'svm', 'svmModelTest', False)
示例#7
0
def train_slur_detector():
	aT.featureAndTrain([os.path.join('/home', 'adi', 'HackDuke', 'Audio_Train','Normal'), os.path.join('/home', 'adi', 'HackDuke', 'Audio_Train','Slurred')], 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, 'randomforest', 'rfTemp', False)   
     
	aT.featureAndTrain([os.path.join('/home', 'adi', 'HackDuke', 'Audio_Train','Normal'), os.path.join('/home', 'adi', 'HackDuke', 'Audio_Train','Slurred')], 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, 'svm_rbf', 'svmRbfTemp', False)
示例#8
0
def trainClassifierWrapper(method, beat_feats, directories, model_name):
    if len(directories) < 2:
        raise Exception("At least 2 directories are needed")
    aT.featureAndTrain(directories, 1, 1, aT.shortTermWindow, aT.shortTermStep,
                       method.lower(), model_name, compute_beat=beat_feats)
示例#9
0
# 模型的训练过程
from pyAudioAnalysis import audioTrainTest as aT
import nn

# 模型的名称和文件名
model = "svn"
modelName = "emotionModelData1SVN"

# 直接调用pyAudioAnalysis的包
# https://github.com/tyiannak/pyAudioAnalysis/wiki/4.-Classification-and-Regression
aT.featureAndTrain([
    "/Users/aaa/Documents/python/wav/labels/angry",
    "/Users/aaa/Documents/python/wav/labels/neutral"
], 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, model, modelName, False)

# 训练神经网络模型的调用方法
#nn.trainNN(["/Users/aaa/Documents/python/wav/labels/angry",
#       "/Users/aaa/Documents/python/wav/labels/neutral"],
#       1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, False)

# 验证模型的准确率、召回率等
import os
from os import listdir
from os.path import isfile, join

# 测试音频的地址(文件夹)
dataPath = "/Users/aaa/Documents/python/wav/labels/test/"
files = [
    join(dataPath, f) for f in listdir(dataPath) if isfile(join(dataPath, f))
]
print f
from pyAudioAnalysis import audioTrainTest as aT

aT.featureAndTrain(["./audio/metal", "./audio/dance"], 1.0, 1.0,
                   aT.shortTermWindow, aT.shortTermStep, "svm", "svmSMtemp",
                   False)
示例#11
0
 aT.featureAndTrain(
     species,
     # [
     #     "Bent-Beak-Riffraff",
     #     "Blue-collared-Zipper",
     #     "Bombadil",
     #     "Broad-winged-Jojo",
     #     "Canadian-Cootamum",
     #     "Carries-Champagne-Pipit",
     #     "Darkwing-Sparrow",
     #     "Eastern-Corn-Skeet",
     #     "Green-Tipped-Scarlet-Pipit",
     #     "Lesser-Birchbeere",
     #     "Orange-Pine-Plover",
     #     "Ordinary-Snape",
     #     "Pinkfinch",
     #     "Purple-Tooting-Tout",
     #     "Qax",
     #     "Queenscoat",
     #     "Rose-Crested-Blue-Pipit",
     #     "Scrawny-Jay",
     #     "Vermillion-Trillian",
     # ],
     1.0,
     1.0,
     aT.shortTermWindow,
     aT.shortTermStep,
     "svm",
     "svmSMtemp",
     False,
 )
示例#12
0
import os
from pyAudioAnalysis import audioTrainTest as aT

os.chdir('/export/home/u16/smcl/AAPB')


aT.featureAndTrain(['Reagan','test_set_616_clips'], 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, "svm", "svm_Reagan_UBM_all", False)

aT.featureAndTrain(['Reagan','test_set_616_clips'], 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, "gradientboosting", "gradientboosting_Reagan_UBM_all", False)

aT.featureAndTrain(['Reagan','test_set_616_clips'], 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, "extratrees", "extratrees_Reagan_UBM_all", False)

aT.featureAndTrain(['Reagan','test_set_616_clips'], 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, "randomforest", "randomforest_Reagan_UBM_all", False)

aT.featureAndTrain(['Reagan','test_set_616_clips'], 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, "knn", "knn_Reagan_UBM_all", False)




import os
from pyAudioAnalysis import audioTrainTest as aT

os.chdir('/export/home/u16/smcl/AAPB')


aT.featureAndTrain(['Child','test_set_616_clips'], 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, "svm", "svm_Child_UBM_all", False)

aT.featureAndTrain(['Child','test_set_616_clips'], 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, "gradientboosting", "gradientboosting_Child_UBM_all", False)

aT.featureAndTrain(['Child','test_set_616_clips'], 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, "extratrees", "extratrees_Child_UBM_all", False)
import sys
sys.path.append('C:\Python27\Lib\site-packages')
from pyAudioAnalysis import audioTrainTest as aT
import os
import pydub
pydub.AudioSegment.converter = r"C:\ffmpeg\bin"
import numpy as np

testData = "testData"
classifierData = [
    "classifierData/cachorro", "classifierData/gato", "classifierData/onca"
]

# Fase de Treinamento
aT.featureAndTrain(classifierData, 1.0, 1.0, aT.shortTermWindow,
                   aT.shortTermStep, "svm", "svmSMtemp", False)

# Fase de Testes
fileout = open("result.txt", "w")

for filename in os.listdir(testData):
    filename = testData + "/" + filename
    Result, P, classNames = aT.fileClassification(filename, "svmSMtemp", "svm")
    P = P * 100
    winner = np.argmax(P)  #pega o valor com a maior taxa de probabilidade.
    print("Arquivo: " + filename + "; Animal: " + classNames[winner] +
          "; Probabilidade: " + str(P[winner]) + "%")
    fileout.write("Arquivo: " + filename + "; Animal: " + classNames[winner] +
                  "; Probabilidade: " + str(P[winner]) + "%\n")
fileout.close()
示例#14
0
文件: train.py 项目: DrGoZF/irate
# 模型训练的调用 - 直接调用pyAudioAnalysis的函数,具体见voiceEmotionAnalysis.py

from pyAudioAnalysis import audioTrainTest as aT
aT.featureAndTrain([
    "/Users/aaa/Documents/python/wav/labels/angry",
    "/Users/aaa/Documents/python/wav/labels/neutral"
], 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, "svm",
                   "emotionModelData1SVM", False)
示例#15
0
def train_deception_model(
    truth_audio_path="../../training-data/deception-audio-datasets/truth_audio",
    lie_audio_path="../../training-data/deception-audio-datasets/lie_audio_edited"
):
    """
    developed by tybruno

    This function was created to train the deception model using .wav files. Only .wav files are supported

    :param truth_audio_path: path to the truth audio (.wav) datasets
    :param lie_audio_path:  path to the lie audio (.wav) datasets
    :return:
    """

    #train model
    aT.featureAndTrain(
        list_of_dirs=[truth_audio_path, lie_audio_path],
        mt_win=1,
        mt_step=1,
        st_win=aT.shortTermWindow,
        st_step=aT.shortTermStep,
        classifier_type="svm",
        model_name="deceptionSvm_edited",
        compute_beat=False,
    )
    aT.featureAndTrain(
        list_of_dirs=[truth_audio_path, lie_audio_path],
        mt_win=1,
        mt_step=1,
        st_win=aT.shortTermWindow,
        st_step=aT.shortTermStep,
        classifier_type="knn",
        model_name="deceptionKNN_edited",
        compute_beat=False,
    )
    aT.featureAndTrain(
        list_of_dirs=[truth_audio_path, lie_audio_path],
        mt_win=1,
        mt_step=1,
        st_win=aT.shortTermWindow,
        st_step=aT.shortTermStep,
        classifier_type="randomforest",
        model_name="deceptionRandomForest_edited",
        compute_beat=False,
    )
    aT.featureAndTrain(
        list_of_dirs=[truth_audio_path, lie_audio_path],
        mt_win=1,
        mt_step=1,
        st_win=aT.shortTermWindow,
        st_step=aT.shortTermStep,
        classifier_type="gradientboosting",
        model_name="deceptionGradientBoosting_edited",
        compute_beat=False,
    )
    aT.featureAndTrain(
        list_of_dirs=[truth_audio_path, lie_audio_path],
        mt_win=1,
        mt_step=1,
        st_win=aT.shortTermWindow,
        st_step=aT.shortTermStep,
        classifier_type="extratrees",
        model_name="deceptionExtraTrees_edited",
        compute_beat=False,
    )
示例#16
0
import sys
import os
import numpy
import shutil

from pyAudioAnalysis import audioTrainTest as aT
aT.featureAndTrain(["Python/pyAudioAnalysis-master/pyAudioAnalysis/classifierData/noise","Python/pyAudioAnalysis-master/pyAudioAnalysis/classifierData/speech","Python/pyAudioAnalysis-master/pyAudioAnalysis/classifierData/nothing"], 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, "svm", "svmSMtemp", False)
示例#17
0
from pyAudioAnalysis import audioTrainTest as aT
#aT.featureAndTrain(["/home/tyiannak/Desktop/MusicGenre/Classical/","/home/tyiannak/Desktop/MusicGenre/Electronic/","/home/tyiannak/Desktop/MusicGenre/Jazz/"], 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, "svm", "svmMusicGenre3", True)
aT.featureAndTrain(
    ["pyAudioAnalysis/data/bones_music/", "pyAudioAnalysis/data/speech_vids/"],
    1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, "svm", "svmBones", False)
print aT.fileClassification(
    "Intro_obtained_from_longest_common_subsequence.wav", "svmBones", "svm")
from pyAudioAnalysis import audioTrainTest as aT

aT.featureAndTrain([
    "./categories/tecla_0", "./categories/tecla_1", "./categories/tecla_2",
    "./categories/tecla_3", "./categories/tecla_4", "./categories/tecla_5",
    "./categories/tecla_6", "./categories/tecla_7", "./categories/tecla_8",
    "./categories/tecla_9"
], 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, "svm", "svmDTMF", False)

aT.featureAndTrain([
    "./categories/tecla_0", "./categories/tecla_1", "./categories/tecla_2",
    "./categories/tecla_3", "./categories/tecla_4", "./categories/tecla_5",
    "./categories/tecla_6", "./categories/tecla_7", "./categories/tecla_8",
    "./categories/tecla_9"
], 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, "knn", "knnDTMF", False)
# -*- coding: utf-8 -*-
"""
Spyder Editor

"""
from pyAudioAnalysis import audioTrainTest as aT
import os

dirname = '/home/hitesh/audioClassificationNew/trainingData'

subdirectories = os.listdir(dirname)[:10]

#subdirectories.pop(0)

subdirectories = [dirname + "/" + subDirName for subDirName in subdirectories]

print(subdirectories)
aT.featureAndTrain(subdirectories, 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, "svm", "newModel", False)


'''
(list of directories,mid window size,mid window steps, short window size, short window step,type, file name to be saved,)
Frequancy
 
'''
示例#20
0
from pyAudioAnalysis import audioTrainTest as aT
aT.featureAndTrain(["classifierData/snake","classifierData/Cheetah","classifierData/frog","classifierData/Fox","classifierData/seal","classifierData/panther","classifierData/tiger"], 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, "svm", "svmSMtemp", False)
aT.fileClassification("rattlesnake.wav", "svmSMtemp","svm")
#!/usr/local/bin/python2
from pyAudioAnalysis import audioTrainTest as aT
import os
from sys import argv

script, dirname = argv

subdirectories = os.listdir(dirname)[:5]
subdirectories.pop(0)

subdirectories = [dirname + "/" + subDirName for subDirName in subdirectories]

print(subdirectories)
aT.featureAndTrain(subdirectories, 1.0, 1.0, aT.shortTermWindow,
                   aT.shortTermStep, "svm", "svmModel", False)
from pyAudioAnalysis import audioTrainTest as aT

aT.featureAndTrain(["./categories/tecla_0","./categories/tecla_1","./categories/tecla_2","./categories/tecla_3","./categories/tecla_4","./categories/tecla_5","./categories/tecla_6","./categories/tecla_7","./categories/tecla_8","./categories/tecla_9"], 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, "svm", "svmDTMF", False)

aT.featureAndTrain(["./categories/tecla_0","./categories/tecla_1","./categories/tecla_2","./categories/tecla_3","./categories/tecla_4","./categories/tecla_5","./categories/tecla_6","./categories/tecla_7","./categories/tecla_8","./categories/tecla_9"], 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, "knn", "knnDTMF", False)
示例#23
0
from pyAudioAnalysis import audioTrainTest as aT
aT.featureAndTrain(["/var/www/html/data4/train/Artifact","/var/www/html/data4/train/Murmur","/var/www/html/data4/train/Normal"],0.125, 0.0625, aT.shortTermWindow, aT.shortTermStep, "svm", "/var/www/html/knnAll")

subdirectories = os.listdir(dirname)[:14]

print (subdirectories)

subdirectories = [dirname + "/" + subDirName for subDirName in subdirectories]
print(subdirectories)
#pyAudioAnalysis has 5 classifiers:
#   1)SVM
#   2)KNN
#   3)randomForest
#   4)GradientBoosting

#   5)Extra Trees

#SVM classifier used:
aT.featureAndTrain(subdirectories, 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, "svm", "svmModel", False)

#KNN classifier used
aT.featureAndTrain(subdirectories, 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, "knn", "knnModel", False)

#Extra Trees Model used
aT.featureAndTrain(subdirectories, 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, "extratrees", "extratreesModel", False)

#Gradient Boosting classifier used
aT.featureAndTrain(subdirectories, 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, "gradientboosting", "gradientboostingModel", False)

#RandomForest classifier used
aT.featureAndTrain(subdirectories, 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, "randomforest", "randomforestModel", False)


示例#25
0
print("\n\n\n * * * TEST 2 * * * \n\n\n")
[Fs, x] = audioBasicIO.read_audio_file(root_data_path +
                                       "pyAudioAnalysis/data/doremi.wav")
x = audioBasicIO.stereo_to_mono(x)
specgram, TimeAxis, FreqAxis = audioFeatureExtraction.stSpectogram(
    x, Fs, round(Fs * 0.040), round(Fs * 0.040), True)

print("\n\n\n * * * TEST 3 * * * \n\n\n")
[Fs, x] = audioBasicIO.read_audio_file(root_data_path +
                                       "pyAudioAnalysis/data/doremi.wav")
x = audioBasicIO.stereo_to_mono(x)
specgram, TimeAxis, FreqAxis = audioFeatureExtraction.stChromagram(
    x, Fs, round(Fs * 0.040), round(Fs * 0.040), True)

print("\n\n\n * * * TEST 4 * * * \n\n\n")
aT.featureAndTrain([root_data_path + "SM/speech", root_data_path + "SM/music"],
                   1.0, 1.0, 0.2, 0.2, "svm", "temp", True)

print("\n\n\n * * * TEST 5 * * * \n\n\n")
[flagsInd, classesAll, acc, CM] = aS.mtFileClassification(
    root_data_path + "pyAudioAnalysis/data//scottish.wav",
    root_data_path + "pyAudioAnalysis/data/svmSM", "svm", True,
    root_data_path + 'pyAudioAnalysis/data/scottish.segments')

print("\n\n\n * * * TEST 6 * * * \n\n\n")
aS.trainHMM_fromFile(root_data_path + 'radioFinal/train/bbc4A.wav',
                     root_data_path + 'radioFinal/train/bbc4A.segments',
                     'hmmTemp1', 1.0, 1.0)
aS.trainHMM_fromDir(root_data_path + 'radioFinal/small', 'hmmTemp2', 1.0, 1.0)
aS.hmmSegmentation(root_data_path + 'pyAudioAnalysis/data//scottish.wav',
                   'hmmTemp1', True, root_data_path +
                   'pyAudioAnalysis/data//scottish.segments')  # test 1
示例#26
0
    os.system('mv ./data_interviewers/511_interviewer/'+i+' ./data_interviewers/511_interviewer_train/')
for i in wavs_511_interviewer[half:]:
    os.system('mv ./data_interviewers/511_interviewer/'+i+' ./data_interviewers/511_interviewer_test/')


wavs_108_subset = os.listdir('./data_interviewers/108_subset/')
random.shuffle(wavs_108_subset )
half = int(len(wavs_108_subset )/2)
for i in wavs_108_subset[:half]:
    os.system('mv ./data_interviewers/108_subset/'+i+' ./data_interviewers/108_subset_train/')
for i in wavs_108_subset[half:]:
    os.system('mv ./data_interviewers/108_subset/'+i+' ./data_interviewers/108_subset_test/')

# Feature extraction
from pyAudioAnalysis import audioTrainTest as aT
aT.featureAndTrain(["./data_interviewers/511_interviewer_train/", "./data_interviewers/108_subset_train/"], 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, "svm", "svmSMtemp", False)

results= []
test_dir = './data_interviewers/108_subset_test/'
for interviewer_sample in os.listdir(test_dir):
    results.append(aT.fileClassification(test_dir+interviewer_sample, "svmSMtemp","svm"))

y_pred = [n[0] for n in results]
import collections
counter=collections.Counter(y_pred)
print(counter)


Result:
(0.0, array([ 0.90156761,  0.09843239]), ['music', 'speech'])
示例#27
0
import shutil
try:
    shutil.rmtree('sciezki', ignore_errors=True)
except:
    pass
try:
    os.mkdir("sciezki")
except:
    pass
client = Client('e-mail', 'password')
q = queue.Queue()
ficzery = [
    "cisza", "alarm", "rakietybaza", "rakietydaleko", "tlumik", "ak", "c4",
    "c4dalej", "statek", "helka"
]
aT.featureAndTrain(ficzery, 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep,
                   "svm", "svmSMtemp", False)


def callback(indata, frames, time, status):
    """This is called (from a separate thread) for each audio block."""
    if status:
        print(status, file=sys.stderr)
    q.put(indata.copy())


device_info = sd.query_devices(2, 'input')
samplerate = int(device_info['default_samplerate'])
i = 0
try:
    while (True):
        with sf.SoundFile("sciezki/output{}.wav".format(i),
示例#28
0
from pyAudioAnalysis import audioTrainTest as aT
aT.featureAndTrain(["classifierData/snake","classifierData/tiger"], 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, "svm", "svmSMtemp", False)
aT.fileClassification("snakehit.wav", "svmSMtemp","svm")
示例#29
0
def trainClassifierWrapper(method, beat_feats, directories, model_name):
    if len(directories) < 2:
        raise Exception("At least 2 directories are needed")
    aT.featureAndTrain(directories, 1, 1, aT.shortTermWindow, aT.shortTermStep,
                       method.lower(), model_name, compute_beat=beat_feats)
示例#30
0
from pyAudioAnalysis import audioTrainTest as aT
aT.featureAndTrain([
    "/home/brandonjabr/pyAudio/pyAudioAnalysis/my-recordings/highlights",
    "/home/brandonjabr/pyAudio/pyAudioAnalysis/my-recordings/background"
], 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, "svm", "svmSMtemp", False)
aT.fileClassification(
    "/home/brandonjabr/pyAudio/pyAudioAnalysis/my-recordings/highlights/out-02.wav",
    "svmSMtemp", "svm")
示例#31
0
def main():
    # def train():
    input('Speak for 5 secs after pressing \'Enter\': ')
    print('\nRecording')

    time.sleep(.5)

    frames = []

    for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
        data = stream.read(CHUNK)
        frames.append(data)

    print('\nRecording Saved.')
    stream.stop_stream()
    stream.close()
    p.terminate()

    wf = wave.open('sounds/' + 'output%d.wav' % FILE_NUMBER, 'wb')
    wf.setnchannels(CHANNELS)
    wf.setsampwidth(p.get_sample_size(FORMAT))
    wf.setframerate(RATE)
    wf.writeframes(b''.join(frames))
    wf.close()

    # >>>>>>FEATURE EXTRACTION
    [fs, x] = audioBasicIO.readAudioFile('sounds/output%d.wav' % FILE_NUMBER)
    f, f_names = ShortTermFeatures.feature_extraction(x, fs, 0.050 * fs,
                                                      0.025 * fs)
    print(f_names)
    print(f)

    # def trainClassifier():
    # >>>>>TRAINING SVM
    aT.featureAndTrain([
        "Male/",
        "Female/",
    ], 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, "svm", "svm2Classes")

    aT.fileClassification('sounds/output1.wav', "svm2Classes", svm)

    # def playAudio ():
    # Play audio
    input('To play audio press \'Enter\': ')
    filename = 'sounds/output1.wav'
    wave_obj = sa.WaveObject.from_wave_file(filename)
    play_obj = wave_obj.play()
    play_obj.wait_done()  # Wait until sound has finished playing
    print("Audio has finished playing")

    # def manipulate():
    [fs, x] = audioBasicIO.readAudioFile('sounds/output%d.wav' % FILE_NUMBER)
    f, f_names = ShortTermFeatures.feature_extraction(x, fs, 0.050 * fs,
                                                      0.025 * fs)
    input('To manipulate input press \'Enter\': ')
    # Create an array of random numbers to use as the adversarial input
    r = np.random.rand(68, 198)
    print("Adversarial input\n", r)

    # Create an empty array to allow the user to edit any feature they want.
    s = (68, 198)
    e = np.zeros(s)
    print("Empty data\n", e)

    # Print the feature values for the original audio clip
    print("Audio clip\n", f)

    # Multiply the original audio with manipulated data to see if it can misclassify
    m = f * r

    print("Manipulated data\n", m)

    # def plotGraphs ():
    # Plotting original input
    plt.subplot(2, 2, 1)
    plt.plot(f[0, :])
    plt.xlabel('Original')
    plt.ylabel(f_names[0])

    # Plotting adversarial input
    plt.subplot(2, 2, 2)
    plt.plot(r[0, :])
    plt.xlabel('Adversarial input')

    # Plotting manipulated data
    plt.subplot(2, 2, 3)
    plt.plot(m[0, :])
    plt.xlabel('manipulated data')
    plt.show()

    # Convert manipulated array back into wav
    librosa.feature.inverse.mfcc_to_audio(m,
                                          n_mels=128,
                                          dct_type=2,
                                          norm='ortho',
                                          ref=1.0,
                                          lifter=0,
                                          **kwargs)
示例#32
0
import numpy as np
import scipy as sp
import pydub
import matplotlib as plt
from pyAudioAnalysis import audioTrainTest as aT

# In[2]:

# Reading the dataset with .wav files
#sr, x = scipy.io.wavfile.read('/home/sarvpsin/Desktop/pyAudioAnalysis/pyAudioAnalysis/Data_mic/gun_shot_wav/102305.wav ')

# In[8]:

aT.featureAndTrain([
    "/home/sarvpsin/Desktop/pyAudioAnalysis/pyAudioAnalysis/Data_mic/gun_shot_wav",
    "/home/sarvpsin/Desktop/pyAudioAnalysis/pyAudioAnalysis/Data_mic/car_horn_wav"
], 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, "svm", "SVMTry", False)

# In[12]:

#train test split here

aT.fileClassification(
    "/home/sarvpsin/Desktop/pyAudioAnalysis/pyAudioAnalysis/Data_mic/Handgun_sound_effect_1-youtube-NWezpZms1VA-140-192.wav",
    "SVMTry", "svm")

# In[6]:

# knn
#accuracy and F1_score
from pyAudioAnalysis import audioTrainTest as aT
import argparse
import os

parser = argparse.ArgumentParser()
parser.add_argument("root", help="directory with training examples per bird",
                    type=str)
args = parser.parse_args()

birds = ['Trainings_b151', 'Trainings_b174', 'Trainings_b179', 'Trainings_DB12', 'Trainings_DB20', 'Trainings_DB30', 'Trainings_b73', 'Trainings_DB4']

'''
EG: aT.featureAndTrain(["Fragments/Shuffle/","Fragments/Silence/","Fragments/Micbeep/", "Fragments/Backgroundbird1/"],
    1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, "knn", "manxknn")
'''
if __name__ == "__main__":
    root = args.root

    for bird in birds:
        bird_birds_dir = os.listdir(root + bird)
        sound_fragmentdir_list = [root + '/' + bird + '/' + sound_dir
                for sound_dir in bird_birds_dir
                if len(os.listdir(root + '/' + bird + '/' + sound_dir)) > 1]
        aT.featureAndTrain(sound_fragmentdir_list, 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, "knn", "knnmodel" + bird)
示例#34
0
import sys
import os
SCRIPT_DIR = os.path.dirname(__file__)
sys.path.append(SCRIPT_DIR + "/../pyAudioAnalysis")
from pyAudioAnalysis import audioTrainTest as aT
import os
from sys import argv

dirname = SCRIPT_DIR + "/../data/training/"
script, model_type = argv
model_filename = SCRIPT_DIR + "/../data/" + model_type

entries = os.listdir(dirname)
print("all entries: ", entries)
subdirectories = [i for i in entries if os.path.isdir(dirname + i)]
print("dirs only: ", subdirectories)

subdirectories = [dirname + subDirName for subDirName in subdirectories]

print("final dirs: ", subdirectories)
aT.featureAndTrain(subdirectories, 1.0, 1.0, aT.shortTermWindow,
                   aT.shortTermStep, model_type, model_filename, False)
示例#35
0
import os

from pyAudioAnalysis import audioTrainTest as aT

from python_scripts import tools
import time

MODEL_TYPES = [
    'knn', 'svm', 'svm_rbf', 'randomforest', 'gradientboosting', 'extratrees',
    'Emotion7'
]

EMOTION_LIST = tools.MAP_EMOTION.values()

input_dir = 'output_by_emotion'
output_dir = 'Models'
model_name = 'Emotion7'
model_type = MODEL_TYPES[3]
output_name = model_type + model_name

path_base = os.path.join(os.path.dirname(os.getcwd()), input_dir)
dir_list = [os.path.join(path_base, emo) for emo in EMOTION_LIST]

begin = time.time()
aT.featureAndTrain(dir_list, 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep,
                   model_type, os.path.join(output_dir, output_name))

end = time.time()
print("Duration : {}".format(end - begin))
示例#36
0
from pyAudioAnalysis import audioTrainTest as aT

myList = ['Austin/a','Austin/b','Austin/c','Austin/d','Austin/e','Austin/f','Austin/g','Austin/h','Austin/i','Austin/j','Austin/k','Austin/l','Austin/m','Austin/have','Austin/in','Austin/that','Austin/the',"Austin/toNew",'Austin/u','Austin/n']
aT.featureAndTrain(myList, 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, 'svm', 'svm', False)
def classify(classifier,myFile):
    [result, p, classNames] = aT.fileClassification(myFile,classifier,classifier)
    print result
    print p
    print classNames
    maxI = maxIndex(p)
    return classNames[maxI]
    #print aT.fileClassification(myFile,classifier,classifier)

    # print aT.fileClassification("Austin/a1.wav", classifier, classifier)
    # print aT.fileClassification("Austin/f1.wav", classifier,classifier)
    # print aT.fileClassification("Austin/h1.wav", classifier,classifier)
    # print aT.fileClassification("Austin/u1.wav", classifier,classifier)
    # print aT.fileClassification("Austin/c1.wav", classifier,classifier)
def maxIndex(a):
    i = 0
    maxInd = 0
    maxProb = 0
    for element in a:
        if element > maxProb:
            maxInd = i
            maxProb = element
        i = i + 1
    return maxInd
def classifyFile(myFile):
    print aT.fileClassification(myFile, 'svm', 'svm')
示例#37
0
def parseArguments():
    parser = argparse.ArgumentParser(prog='PROG')
    parser.add_argument(
        '-d',
        '--data_folder',
        nargs=None,
        default=
        "/Users/tyiannak/ResearchData/Audio Dataset/pyAudioAnalysisData/")
    parser.add_argument('-c',
                        '--classifier_type',
                        nargs=None,
                        required=True,
                        choices=[
                            "knn", "svm", "svm_rbf", "randomforest",
                            "extratrees", "gradientboosting"
                        ],
                        help="Classifier type")
    args = parser.parse_args()
    return args


if __name__ == '__main__':
    args = parseArguments()
    root_data_path = args.data_folder
    classifier_type = args.classifier_type
    classifier_path = "sm_" + classifier_type
    aT.featureAndTrain(
        [root_data_path + "SM/speech", root_data_path + "SM/music"], 1.0, 1.0,
        0.2, 0.2, classifier_type, classifier_path, False)
from __future__ import print_function
from pyAudioAnalysis import audioBasicIO
from pyAudioAnalysis import audioFeatureExtraction
from pyAudioAnalysis import audioTrainTest as aT
from pyAudioAnalysis import audioSegmentation as aS
import matplotlib.pyplot as plt
import argparse

def parseArguments():
    parser = argparse.ArgumentParser(prog='PROG')
    parser.add_argument('-d' , '--data_folder', nargs=None, default="/Users/tyiannak/ResearchData/Audio Dataset/pyAudioAnalysisData/")
    parser.add_argument('-c' , '--classifier_type', nargs=None, required=True, 
                        choices = ["knn", "svm", "svm_rbf", "randomforest", "extratrees", "gradientboosting"],
                        help="Classifier type")
    args = parser.parse_args()        
    return args



if __name__ == '__main__':
    args = parseArguments()
    root_data_path = args.data_folder
    classifier_type = args.classifier_type
    classifier_path = "sm_" + classifier_type
    aT.featureAndTrain([root_data_path +"SM/speech",root_data_path + "SM/music"], 
                       1.0, 1.0, 0.2, 0.2, classifier_type, 
                       classifier_path, False)
示例#39
0
#from scipy.io.wavfile import read
#import numpy as np

#a = read("testbarks.wav")
#x = np.array(a[1])
#w = np.fft.fft(x)
#freqs = np.fft.fftfreq(len(w))
#print(w)
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')

from pyAudioAnalysis import audioTrainTest as aT
aT.featureAndTrain(["/home/Project/train/Dog","/home/Project/train/Man","/home/Project/train/Clatter","/home/Project/train/Bang", "/home/Project/train/Lownoise"], 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, "knn", "testDogy", False)
#Clatter short
#test = aT.fileClassification("/home/Project/train/Clatter/clatter3.wav", "testDogx","knn")
#print("Expected: Clatter")
#print(test)
#Loud door knock
#test = aT.fileClassification("/home/Project/train/Clatter/2016-03-13_1341393.wav", "testDogx","knn")
#print("Expected: Clatter")
#print(test)
#Dropping on hard floor
#test = aT.fileClassification("/home/Project/train/Clatter/2016-01-14_1013132.wav", "testDogx","knn")
#print("Expected: Clatter")
#print(test)
#Bang, short thud
#test = aT.fileClassification("/home/Project/train/Bang/2016-01-13_0909361.wav", "testDogx","knn")
#print("Expected: Bang")
#print(test)
#Bang, Watery hit 
示例#40
0
F, f_names = audioFeatureExtraction.stFeatureExtraction(x, Fs, 0.050*Fs, 0.025*Fs);
plt.subplot(2,1,1); plt.plot(F[0,:]); plt.xlabel('Frame no'); plt.ylabel(f_names[0]);
plt.subplot(2,1,2); plt.plot(F[1,:]); plt.xlabel('Frame no'); plt.ylabel(f_names[1]); plt.show()

print("\n\n\n * * * TEST 2 * * * \n\n\n")
[Fs, x] = audioBasicIO.readAudioFile(root_data_path + "pyAudioAnalysis/data/doremi.wav")
x = audioBasicIO.stereo2mono(x)
specgram, TimeAxis, FreqAxis = audioFeatureExtraction.stSpectogram(x, Fs, round(Fs * 0.040), round(Fs * 0.040), True)

print("\n\n\n * * * TEST 3 * * * \n\n\n")
[Fs, x] = audioBasicIO.readAudioFile(root_data_path + "pyAudioAnalysis/data/doremi.wav")
x = audioBasicIO.stereo2mono(x)
specgram, TimeAxis, FreqAxis = audioFeatureExtraction.stChromagram(x, Fs, round(Fs * 0.040), round(Fs * 0.040), True)

print("\n\n\n * * * TEST 4 * * * \n\n\n")
aT.featureAndTrain([root_data_path +"SM/speech",root_data_path + "SM/music"], 1.0, 1.0, 0.2, 0.2, "svm", "temp", True)

print("\n\n\n * * * TEST 5 * * * \n\n\n")
[flagsInd, classesAll, acc, CM] = aS.mtFileClassification(root_data_path + "pyAudioAnalysis/data//scottish.wav", root_data_path + "pyAudioAnalysis/data/svmSM", "svm", True, root_data_path + 'pyAudioAnalysis/data/scottish.segments')

print("\n\n\n * * * TEST 6 * * * \n\n\n")
aS.trainHMM_fromFile(root_data_path + 'radioFinal/train/bbc4A.wav', root_data_path + 'radioFinal/train/bbc4A.segments', 'hmmTemp1', 1.0, 1.0)	
aS.trainHMM_fromDir(root_data_path + 'radioFinal/small', 'hmmTemp2', 1.0, 1.0)
aS.hmmSegmentation(root_data_path + 'pyAudioAnalysis/data//scottish.wav', 'hmmTemp1', True, root_data_path + 'pyAudioAnalysis/data//scottish.segments')				# test 1
aS.hmmSegmentation(root_data_path + 'pyAudioAnalysis/data//scottish.wav', 'hmmTemp2', True, root_data_path + 'pyAudioAnalysis/data//scottish.segments')				# test 2

print("\n\n\n * * * TEST 7 * * * \n\n\n")
aT.featureAndTrainRegression(root_data_path + "pyAudioAnalysis/data/speechEmotion", 1, 1, 0.050, 0.050, "svm_rbf", "temp.mod", compute_beat=False)
print(aT.fileRegression(root_data_path + "pyAudioAnalysis/data/speechEmotion/01.wav", "temp.mod", "svm_rbf"))

print("\n\n\n * * * TEST 8 * * * \n\n\n")