def train(train_path): data_folders = [ os.path.join(train_path, args.classcode + str(i)) for i in range(class_count) ] aT.extract_features_and_train(data_folders, 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, args.algorithm, model_name, False)
def train_fold(fold_no): data_folders = [ dataset_path + "/train" + str(fold_no) + "/" + args.classcode + str(i) for i in range(class_count) ] aT.extract_features_and_train(data_folders, 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, args.algorithm, model_name, False)
def trainClassifierWrapper(method, beat_feats, directories, model_name): if len(directories) < 2: raise Exception("At least 2 directories are needed") aT.extract_features_and_train(directories, 1, 1, aT.shortTermWindow, aT.shortTermStep, method.lower(), model_name, compute_beat=beat_feats)
def train_segment_classifier_and_create_model(music_parent_directory, model_name): # List all the subdirectories subdirectories = os.listdir(music_parent_directory) for i in range(0, len(subdirectories)): if subdirectories[i] == '.DS_Store': subdirectories.pop(i) break subdirectories = [ music_parent_directory + '/' + subdirectory for subdirectory in subdirectories ] aT.extract_features_and_train(subdirectories, 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, "randomforest", 'data/' + model_name, True)
best_polynomial_model = SVC(**polyKernel_param) best_polynomial_model.fit(X_train, y_train) plt.figure() plt.title("Polynomial Kernel SVC decision boundary", fontsize=16) plot_decision_boundaries(X_train, y_train.apply(lambda x: 1 if x == 'cough' else 0), SVC, **polyKernel_param) plt.show() ### Storing classifier import joblib joblib.dump(best_gaussian_model, 'SVM_cough_classifier.joblib') ''' Train a classifier with PyAudioAnalysis ''' from pyAudioAnalysis import audioTrainTest as aT help(aT.extract_features_and_train) cough_path = 'C:/Users/Guillem/Desktop/HACKATHON 2020/Unlabeled audio/TRAIN/Cough/' nocough_path = 'C:/Users/Guillem/Desktop/HACKATHON 2020/Unlabeled audio/TRAIN/No_Cough/' svm_linear = aT.extract_features_and_train([cough_path, nocough_path], 0.2, 0.2, aT.shortTermWindow, aT.shortTermStep, "svm", "svm_linear", False, train_percentage=0.80)
def parse_arguments(): """Parse arguments for real time demo. """ parser = argparse.ArgumentParser(description="Train audio classifiers") parser.add_argument("-i", "--input_audio", required=True, nargs='+', help="List of audio paths") parser.add_argument("-o", "--output_model", required=True, nargs=None, help="Output model's path") return parser.parse_args() if __name__ == "__main__": args = parse_arguments() input_audio = args.input_audio output_model = args.output_model mt_win = 3.0 mt_step = 1.0 # this is obviously not optimal in terms of performance but it is quite fast st_win = st_step = 0.1 aT.extract_features_and_train(input_audio, mt_win, mt_step, st_win, st_step, "svm_rbf", output_model, False)
"sm", "movie8", "speakers", "speaker-gender", "music-genre6", "4class" ], help="Classification task") args = parser.parse_args() return args if __name__ == '__main__': args = parseArguments() root_data_path = args.data_folder classifier_type = args.classifier_type if args.task == "sm": aT.extract_features_and_train( [root_data_path + "SM/speech", root_data_path + "SM/music"], 1.0, 1.0, 0.05, 0.05, classifier_type, classifier_type + "_sm", False) elif args.task == "movie8": aT.extract_features_and_train([ root_data_path + "movieSegments/8-class/Speech", root_data_path + "movieSegments/8-class/Music", root_data_path + "movieSegments/8-class/Others1", root_data_path + "movieSegments/8-class/Others2", root_data_path + "movieSegments/8-class/Others3", root_data_path + "movieSegments/8-class/Shots", root_data_path + "movieSegments/8-class/Fights", root_data_path + "movieSegments/8-class/Screams" ], 1.0, 1.0, 0.05, 0.05, classifier_type, classifier_type + "_movie8class", False) elif args.task == "speakers": aT.extract_features_and_train([
aT.extract_features_and_train([ "C:\\Users\\asus\\Desktop\\MIREX\\dataset\\Audio\\Cluster 1\\Agressive\\", "C:\\Users\\asus\\Desktop\\MIREX\\dataset\\Audio\\Cluster 1\\Boisterous\\", "C:\\Users\\asus\\Desktop\\MIREX\\dataset\\Audio\\Cluster 1\\Rowdy\\", "C:\\Users\\asus\\Desktop\\MIREX\\dataset\\Audio\\Cluster 1\\Volatile\\", "C:\\Users\\asus\\Desktop\\MIREX\\dataset\\Audio\\Cluster 2\\Confident\\", "C:\\Users\\asus\\Desktop\\MIREX\\dataset\\Audio\\Cluster 2\\Fiery\\", "C:\\Users\\asus\\Desktop\\MIREX\\dataset\\Audio\\Cluster 2\\Intense\\", "C:\\Users\\asus\\Desktop\\MIREX\\dataset\\Audio\\Cluster 2\\Passionate\\", "C:\\Users\\asus\\Desktop\\MIREX\\dataset\\Audio\\Cluster 2\\Rousing\\", "C:\\Users\\asus\\Desktop\\MIREX\\dataset\\Audio\\Cluster 3\\Autumnal\\", "C:\\Users\\asus\\Desktop\\MIREX\\dataset\\Audio\\Cluster 3\\Brooding\\", "C:\\Users\\asus\\Desktop\\MIREX\\dataset\\Audio\\Cluster 3\\Literate\\", "C:\\Users\\asus\\Desktop\\MIREX\\dataset\\Audio\\Cluster 4\\Bittersweet\\", "C:\\Users\\asus\\Desktop\\MIREX\\dataset\\Audio\\Cluster 4\\Poignant\\", "C:\\Users\\asus\\Desktop\\MIREX\\dataset\\Audio\\Cluster 4\\Wistful\\", "C:\\Users\\asus\\Desktop\\MIREX\\dataset\\Audio\\Cluster 5\\Campy\\", "C:\\Users\\asus\\Desktop\\MIREX\\dataset\\Audio\\Cluster 5\\whimsical\\", "C:\\Users\\asus\\Desktop\\MIREX\\dataset\\Audio\\Cluster 5\\Wry\\", "C:\\Users\\asus\\Desktop\\MIREX\\dataset\\Audio\\Cluster 6\\Cheerful\\", "C:\\Users\\asus\\Desktop\\MIREX\\dataset\\Audio\\Cluster 6\\Rollicking\\", "C:\\Users\\asus\\Desktop\\MIREX\\dataset\\Audio\\Cluster 7\\Fun\\", "C:\\Users\\asus\\Desktop\\MIREX\\dataset\\Audio\\Cluster 7\\Humorous\\", "C:\\Users\\asus\\Desktop\\MIREX\\dataset\\Audio\\Cluster 7\\Silly\\", "C:\\Users\\asus\\Desktop\\MIREX\\dataset\\Audio\\Cluster 7\\Witty\\", "C:\\Users\\asus\\Desktop\\MIREX\\dataset\\Audio\\Cluster 8\\Amiable-good natured\\", "C:\\Users\\asus\\Desktop\\MIREX\\dataset\\Audio\\Cluster 8\\Sweet\\", "C:\\Users\\asus\\Desktop\\MIREX\\dataset\\Audio\\Cluster 9\\Tense - Anxious\\", "C:\\Users\\asus\\Desktop\\MIREX\\dataset\\Audio\\Cluster 9\\Visceral\\", ], midTerm, midTerm / 4, shortTerm, shortTerm / 4, "extratrees", "ET_Vybhav_9-July", True)
# Moves all files to temporary directory os.mkdir("tempTrain") regions = glob.glob("train/*") for region in regions: states = glob.glob(region + "/*") os.mkdir("tempTrain" + region[region.find("/"):]) for state in states: paths = glob.glob(state + "/*") for path in paths: shutil.move(path, "tempTrain" + region[region.find("/"):]) # Gets list of directories to be trained on dirs = ["tempTrain/" + directory for directory in os.listdir("tempTrain")] # Trains model using randomforest aT.extract_features_and_train(dirs, 1.0, 1.0, 0.1, 0.1, "randomforest", "model", False) # Moves all files back to original directory regions = glob.glob("tempTrain/*") for region in regions: paths = glob.glob(region + "/*") for path in paths: end = path.find(" ") if not path[end+1:end+2].isdigit(): end = path[end+1:].find(" ") + end + 1 if end != -1: shutil.move(path, "train" + path[path.find("/"):end]) # Removes temporary directory regions = glob.glob("tempTrain/*") for region in regions:
import os data_dir = 'C:/MachineLearningPractice/emovo/EMOVOdata' emotion_dirs = [x[0] for x in os.walk(data_dir)] print(emotion_dirs[1:]) from pyAudioAnalysis import audioTrainTest as aT aT.extract_features_and_train(emotion_dirs[1:], 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, "svm", "svmSMtemp", False, train_percentage=0.80) aT.file_classification( 'C:/MachineLearningPractice/emovo/EMOVOdata/sadness/tri-f1-b1.wav', "svmSMtemp", "svm")
# Sample code for model training. from pyAudioAnalysis import audioTrainTest as aT class1_dir_name = './audio/pyaudio_train/male' class2_dir_name = './audio/pyaudio_train/female' # aT.extract_features_and_train([class1_dir_name, class2_dir_name], 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, 'svm', 'svmMaleFemale', False) # aT.extract_features_and_train([class1_dir_name, class2_dir_name], 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, 'knn', 'knnMaleFemale', False) # aT.extract_features_and_train([class1_dir_name, class2_dir_name], 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, 'extratrees', 'extraTreesMaleFemale', False) aT.extract_features_and_train([class1_dir_name, class2_dir_name], 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, 'gradientboosting', 'gradientBoostMaleFemale', False) # aT.extract_features_and_train([class1_dir_name, class2_dir_name], 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, 'randomforest', 'rfMaleFemale', False)
from __future__ import print_function from pyAudioAnalysis import audioBasicIO from pyAudioAnalysis import MidTermFeatures from pyAudioAnalysis import audioTrainTest as aT from pyAudioAnalysis import audioSegmentation as aS import matplotlib.pyplot as plt import argparse def parseArguments(): parser = argparse.ArgumentParser(prog='PROG') parser.add_argument('-d' , '--data_folder', nargs=None, default="/Users/tyiannak/ResearchData/Audio Dataset/pyAudioAnalysisData/") parser.add_argument('-c' , '--classifier_type', nargs=None, required=True, choices = ["knn", "svm", "svm_rbf", "randomforest", "extratrees", "gradientboosting"], help="Classifier type") args = parser.parse_args() return args if __name__ == '__main__': args = parseArguments() root_data_path = args.data_folder classifier_type = args.classifier_type classifier_path = "sm_" + classifier_type aT.extract_features_and_train([root_data_path + "SM/speech", root_data_path + "SM/music"], 1.0, 1.0, 0.2, 0.2, classifier_type, classifier_path, False)
shortTerm = 0.0625 aT.extract_features_and_train(["D:\\Capstone\\MIREX-like_mood\\dataset\\Audio\\Cluster 1\\Agressive\\", "D:\\Capstone\\MIREX-like_mood\\dataset\\Audio\\Cluster 1\\Boisterous\\", "D:\\Capstone\\MIREX-like_mood\\dataset\\Audio\\Cluster 1\\Rowdy\\", "D:\\Capstone\\MIREX-like_mood\\dataset\\Audio\\Cluster 1\\Volatile\\", "D:\\Capstone\\MIREX-like_mood\\dataset\\Audio\\Cluster 2\\Confident\\", "D:\\Capstone\\MIREX-like_mood\\dataset\\Audio\\Cluster 2\\Fiery\\", "D:\\Capstone\\MIREX-like_mood\\dataset\\Audio\\Cluster 2\\Intense\\", "D:\\Capstone\\MIREX-like_mood\\dataset\\Audio\\Cluster 2\\Passionate\\", "D:\\Capstone\\MIREX-like_mood\\dataset\\Audio\\Cluster 2\\Rousing\\", "D:\\Capstone\\MIREX-like_mood\\dataset\\Audio\\Cluster 3\\Autumnal\\", "D:\\Capstone\\MIREX-like_mood\\dataset\\Audio\\Cluster 3\\Brooding\\", "D:\\Capstone\\MIREX-like_mood\\dataset\\Audio\\Cluster 3\\Literate\\", "D:\\Capstone\\MIREX-like_mood\\dataset\\Audio\\Cluster 4\\Bittersweet\\", "D:\\Capstone\\MIREX-like_mood\\dataset\\Audio\\Cluster 4\\Poignant\\", "D:\\Capstone\\MIREX-like_mood\\dataset\\Audio\\Cluster 4\\Wistful\\", "D:\\Capstone\\MIREX-like_mood\\dataset\\Audio\\Cluster 5\\Campy\\", "D:\\Capstone\\MIREX-like_mood\\dataset\\Audio\\Cluster 5\\whimsical\\", "D:\\Capstone\\MIREX-like_mood\\dataset\\Audio\\Cluster 5\\Wry\\", "D:\\Capstone\\MIREX-like_mood\\dataset\\Audio\\Cluster 6\\Cheerful\\", "D:\\Capstone\\MIREX-like_mood\\dataset\\Audio\\Cluster 6\\Rollicking\\", "D:\\Capstone\\MIREX-like_mood\\dataset\\Audio\\Cluster 7\\Fun\\", "D:\\Capstone\\MIREX-like_mood\\dataset\\Audio\\Cluster 7\\Humorous\\", "D:\\Capstone\\MIREX-like_mood\\dataset\\Audio\\Cluster 7\\Silly\\", "D:\\Capstone\\MIREX-like_mood\\dataset\\Audio\\Cluster 7\\Witty\\", "D:\\Capstone\\MIREX-like_mood\\dataset\\Audio\\Cluster 8\\Amiable-good natured\\", "D:\\Capstone\\MIREX-like_mood\\dataset\\Audio\\Cluster 8\\Sweet\\", "D:\\Capstone\\MIREX-like_mood\\dataset\\Audio\\Cluster 9\\Tense - Anxious\\", "D:\\Capstone\\MIREX-like_mood\\dataset\\Audio\\Cluster 9\\Visceral\\,"], midTerm, midTerm/4, shortTerm, shortTerm/4, "randomforest", "RF_test", True)
"""! @brief Example 31A @details: Train a speech-music classifier @author Theodoros Giannakopoulos {[email protected]} """ from pyAudioAnalysis.audioTrainTest import extract_features_and_train if __name__ == '__main__': mt = 1.0 st = 0.05 dir_paths = ["../data/speech_music/speech", "../data/speech_music/music"] extract_features_and_train(dir_paths, mt, mt, st, st, "svm_rbf", "svm_speech_music")
from pyAudioAnalysis import audioTrainTest as aT midTerm = 3 shortTerm = 0.0625 aT.extract_features_and_train([ "C:\\Users\\asus\\Desktop\\10sec\\Sad\\", "C:\\Users\\asus\\Desktop\\10sec\\Devotional\\", "C:\\Users\\asus\\Desktop\\10sec\\Happy\\", "C:\\Users\\asus\\Desktop\\10sec\\Party\\", "C:\\Users\\asus\\Desktop\\10sec\\Romantic\\", ], midTerm, midTerm / 4, shortTerm, shortTerm / 4, "extratrees", "ET_5M_15-July", compute_beat=True, train_percentage=1.0) print( "******************************************************************************************************************************" ) print( "******************************************************************************************************************************" ) print( "******************************************************************************************************************************" ) print( "******************************************************************************************************************************" ) print(
# Make sure to change the path # This will generate 3 files, make sure u dont delete them as they will come in handy # MAKE LOGS from pyAudioAnalysis import audioTrainTest as aT #For Vedansh aT.extract_features_and_train([ "C:/Users/asus/Desktop/GTZAN/pop/", "C:/Users/asus/Desktop/GTZAN/country/", "C:/Users/asus/Desktop/GTZAN/blues/", "C:/Users/asus/Desktop/GTZAN/disco/", "C:/Users/asus/Desktop/GTZAN/jazz/", "C:/Users/asus/Desktop/GTZAN/classical/", "C:/Users/asus/Desktop/GTZAN/hiphop/", "C:/Users/asus/Desktop/GTZAN/metal/", "C:/Users/asus/Desktop/GTZAN/reggae/", "C:/Users/asus/Desktop/GTZAN/rock/" ], 1.0, 0.5, aT.shortTermWindow, aT.shortTermStep, "svm", "SVM_Linear", True) #For Arman aT.extract_features_and_train([ "C:/Users/asus/Desktop/GTZAN/pop/", "C:/Users/asus/Desktop/GTZAN/country/", "C:/Users/asus/Desktop/GTZAN/blues/", "C:/Users/asus/Desktop/GTZAN/disco/", "C:/Users/asus/Desktop/GTZAN/jazz/", "C:/Users/asus/Desktop/GTZAN/classical/", "C:/Users/asus/Desktop/GTZAN/hiphop/", "C:/Users/asus/Desktop/GTZAN/metal/", "C:/Users/asus/Desktop/GTZAN/reggae/", "C:/Users/asus/Desktop/GTZAN/rock/" ], 1.25, 0.6, aT.shortTermWindow, aT.shortTermStep, "knn", "knn_k=3", True)
# I'm gonna train you up, honey! from pyAudioAnalysis import audioTrainTest as aT class_dirs = [ # "./wernicke_server_training/burp/", "./wernicke_server_training/dog/", "./wernicke_server_training/lover/", # "./wernicke_server_training/lyriq/", "./wernicke_server_training/other/", "./wernicke_server_training/roxy/", "./wernicke_server_training/sex/", ] aT.extract_features_and_train(class_dirs, 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, "svm", "wernicke_server_model")
from pyAudioAnalysis import audioTrainTest as aT import numpy as np import os ''' subdirectories = ['D:\Audio_Speech_Actors_01-24\Angry', 'D:\Audio_Speech_Actors_01-24\Calm', 'D:\Audio_Speech_Actors_01-24\Disgust', 'D:\Audio_Speech_Actors_01-24\Fearful', 'D:\Audio_Speech_Actors_01-24\Happy', 'D:\Audio_Speech_Actors_01-24\\Neutral', 'D:\Audio_Speech_Actors_01-24\Sad', 'D:\Audio_Speech_Actors_01-24\Surprised'] aT.extract_features_and_train(subdirectories, 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, "svm", "svmModel", False) aT.extract_features_and_train(subdirectories, 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, "knn", "knnModel", False) aT.extract_features_and_train(subdirectories, 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, "randomforest", "randomforestModel", False) ''' Result, P, classNames = aT.file_classification(
from pyAudioAnalysis import audioTrainTest as aT coyote_data_path = "/Users/2020shatgiskessell/Downloads/coyote_howl_dataset/macaulay_library_audio/split_audio_coyote" not_coyote_data_path = "/Users/2020shatgiskessell/Downloads/coyote_howl_dataset/macaulay_library_audio/split_audio_bg" aT.extract_features_and_train([coyote_data_path,not_coyote_data_path], 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, "svm", "svmSMtemp", False) # import librosa # import os # from scipy.fftpack import fft # # # failures = 0 # for filename in os.listdir("/Users/2020shatgiskessell/Downloads/coyote_howl_dataset/macaulay_library_audio/split_audio_coyote/"): # try: # x, sample_rate = librosa.load(filename, sr=None) # except FileNotFoundError: # pass # # # #train fft # # X = fft(x, n_fft) # # X_magnitude, X_phase = librosa.magphase(X) # # X_magnitude_db = librosa.amplitude_to_db(X_magnitude) # # # duration = librosa.get_duration(y=clip, sr=sample_rate) # # # clip = clip[:44,100*duration] # # #perform short time fourier transformation (converts audio in time domain to frequency domain)
print("\n\n\n * * * TEST 2 * * * \n\n\n") [Fs, x] = audioBasicIO.read_audio_file(root_data_path + "pyAudioAnalysis/data/doremi.wav") x = audioBasicIO.stereo_to_mono(x) specgram, TimeAxis, FreqAxis = ShortTermFeatures.spectrogram( x, Fs, round(Fs * 0.040), round(Fs * 0.040), True) print("\n\n\n * * * TEST 3 * * * \n\n\n") [Fs, x] = audioBasicIO.read_audio_file(root_data_path + "pyAudioAnalysis/data/doremi.wav") x = audioBasicIO.stereo_to_mono(x) specgram, TimeAxis, FreqAxis = ShortTermFeatures.chromagram( x, Fs, round(Fs * 0.040), round(Fs * 0.040), True) print("\n\n\n * * * TEST 4 * * * \n\n\n") aT.extract_features_and_train([root_data_path + "1/", root_data_path + "2/"], 1.0, 1.0, 0.2, 0.2, "svm", "temp", True) print("\n\n\n * * * TEST 5 * * * \n\n\n") [flagsInd, classesAll, acc, CM] = aS.mid_term_file_classification( root_data_path + "scottish.wav", root_data_path + "models/svm_rbf_sm", "svm_rbf", True, root_data_path + 'pyAudioAnalysis/data/scottish.segments') print("\n\n\n * * * TEST 6 * * * \n\n\n") aS.train_hmm_from_file(root_data_path + 'radioFinal/train/bbc4A.wav', root_data_path + 'radioFinal/train/bbc4A.segments', 'hmmTemp1', 1.0, 1.0) aS.train_hmm_from_directory(root_data_path + 'radioFinal/small', 'hmmTemp2', 1.0, 1.0) aS.hmm_segmentation(root_data_path + 'pyAudioAnalysis/data//scottish.wav', 'hmmTemp1', True, root_data_path + 'pyAudioAnalysis/data//scottish.segments') # test 1
from pyAudioAnalysis import audioTrainTest as aT paths = ['C:/Users/MADHUKAR/Desktop/Capstone/samples/GTZAN/blues'] fs = 44100 mid_window = fs mid_step = 0.5 * fs short_window = 0.050 * fs short_step = 0.025 * fs classifier_type = "knn" model_name = "knn_train" aT.extract_features_and_train(paths, mid_window, mid_step, short_window, short_step, classifier_type, model_name, compute_beat=False, train_percentage=0.90)
"""! @brief Example 20_roc @details General sound classification example, with focus on ROC diagrams @author Theodoros Giannakopoulos {[email protected]} """ from pyAudioAnalysis.audioTrainTest import extract_features_and_train from pyAudioAnalysis.audioTrainTest import evaluate_model_for_folders import os if __name__ == '__main__': dirs = [ "../data/general/train/animals", "../data/general/train/speech", "../data/general/train/objects", "../data/general/train/music" ] class_names = [os.path.basename(d) for d in dirs] mw, stw = 2, .1 extract_features_and_train(dirs, mw, mw, stw, stw, "svm_rbf", "svm_general_4") dirs_test = [ "../data/general/test/animals", "../data/general/test/speech", "../data/general/test/objects", "../data/general/test/music" ] evaluate_model_for_folders(dirs_test, "svm_general_4", "svm_rbf", "animals")
from pyAudioAnalysis import audioTrainTest as aT from pathlib import Path directory_in_str = "emotionData/Big4" pathlist = Path(directory_in_str).glob('**/*.wav') listOfDirs = [] for path in pathlist: path_in_str = str(path) parent = path.parents[0] if str(parent) not in listOfDirs: listOfDirs.append(str(parent)) mtWin = 1.0 mtStep = 1.0 stWin = aT.shortTermWindow stStep = aT.shortTermStep classifierType = "extratrees" modelName = "emotionModels/et_big4_1.0" beat = True aT.extract_features_and_train(listOfDirs, mtWin, mtStep, stWin, stStep, classifierType, modelName, beat)
# Example6: use pyAudioAnalysis wrapper # to extract feature and train SVM classifier # for 20 music (10 classical/10 metal) song samples from pyAudioAnalysis.audioTrainTest import extract_features_and_train mt, st = 1.0, 0.05 dirs = ["data/male", "data/female"] extract_features_and_train(dirs, mt, mt, st, st, "svm_rbf", "models/svm_male_female")