def Speech(): estimators = get_best_estimators(True) estimators_str, estimator_dict = get_estimators_name(estimators) import argparse parser = argparse.ArgumentParser(description=""" Testing emotion recognition system using your voice, please consider changing the model and/or parameters as you wish. """) parser.add_argument( "-e", "--emotions", help= """Emotions to recognize separated by a comma ',', available emotions are "neutral", "calm", "happy" "sad", "angry", "fear", "disgust", "ps" (pleasant surprise) and "boredom", default is "sad,neutral,happy" """, default="sad,neutral,happy,angry,fear,disgust,ps") parser.add_argument("-m", "--model", help=""" The model to use, 8 models available are: {}, default is "BaggingClassifier" """.format(estimators_str), default="BaggingClassifier") # Parse the arguments passed args = parser.parse_args() features = ["mfcc", "chroma", "mel"] detector = EmotionRecognizer(estimator_dict[args.model], emotions=args.emotions.split(","), features=features, verbose=0) detector.train() print("Please talk") filename = "test.wav" #This bad above this line, give you confusion matrix, I am writing this comment cause I hate you! record_to_file(filename) result = detector.predict(filename) #detector.draw_confusion_matrix() return result, (detector.test_score() / 2) * 100
def plot_histograms(classifiers=True, beta=0.5, n_classes=3, verbose=1): """ Loads different estimators from `grid` folder and calculate some statistics to plot histograms. Params: classifiers (bool): if `True`, this will plot classifiers, regressors otherwise. beta (float): beta value for calculating fbeta score for various estimators. n_classes (int): number of classes """ # get the estimators from the performed grid search result estimators = get_best_estimators(classifiers) final_result = {} for estimator, params, cv_score in estimators: final_result[estimator.__class__.__name__] = [] for i in range(3): result = {} # initialize the class detector = EmotionRecognizer(estimator, verbose=0) # load the data detector.load_data() if i == 0: # first get 1% of sample data sample_size = 0.01 elif i == 1: # second get 10% of sample data sample_size = 0.1 elif i == 2: # last get all the data sample_size = 1 # calculate number of training and testing samples n_train_samples = int(len(detector.X_train) * sample_size) n_test_samples = int(len(detector.X_test) * sample_size) # set the data detector.X_train = detector.X_train[:n_train_samples] detector.X_test = detector.X_test[:n_test_samples] detector.y_train = detector.y_train[:n_train_samples] detector.y_test = detector.y_test[:n_test_samples] # calculate train time t_train = time() detector.train() t_train = time() - t_train # calculate test time t_test = time() test_accuracy = detector.test_score() t_test = time() - t_test # set the result to the dictionary result['train_time'] = t_train result['pred_time'] = t_test result['acc_train'] = cv_score result['acc_test'] = test_accuracy result['f_train'] = detector.train_fbeta_score(beta) result['f_test'] = detector.test_fbeta_score(beta) if verbose: print( f"[+] {estimator.__class__.__name__} with {sample_size*100}% ({n_train_samples}) data samples achieved {cv_score*100:.3f}% Validation Score in {t_train:.3f}s & {test_accuracy*100:.3f}% Test Score in {t_test:.3f}s" ) # append the dictionary to the list of results final_result[estimator.__class__.__name__].append(result) if verbose: print() visualize(final_result, n_classes=n_classes)
def get_best_estimators(self): """Loads estimators from grid files and returns them""" return get_best_estimators(self.classification)
##################################################################################################### # Emotion Analyzer # ##################################################################################################### def get_estimators_name(estimators): result = [ '"{}"'.format(estimator.__class__.__name__) for estimator, _, _ in estimators ] return ','.join(result), { estimator_name.strip('"'): estimator for estimator_name, (estimator, _, _) in zip(result, estimators) } estimators = get_best_estimators(True) estimators_str, estimator_dict = get_estimators_name(estimators) features = ["mfcc", "chroma", "mel"] detector = EmotionRecognizer(estimator_dict['BaggingClassifier'], emotions='angry,happy,neutral'.split(","), features=features, verbose=0) modelObj = open('model_angry.pkl', 'rb') model_emotion = pickle.load(modelObj) ##################################################################################################### def sentiment_scores(sentence): # Create a SentimentIntensityAnalyzer object. sid_obj = SentimentIntensityAnalyzer()