Exemplo n.º 1
0
def Speech():
    estimators = get_best_estimators(True)
    estimators_str, estimator_dict = get_estimators_name(estimators)
    import argparse
    parser = argparse.ArgumentParser(description="""
                                    Testing emotion recognition system using your voice, 
                                    please consider changing the model and/or parameters as you wish.
                                    """)
    parser.add_argument(
        "-e",
        "--emotions",
        help=
        """Emotions to recognize separated by a comma ',', available emotions are
                                            "neutral", "calm", "happy" "sad", "angry", "fear", "disgust", "ps" (pleasant surprise)
                                            and "boredom", default is "sad,neutral,happy"
                                            """,
        default="sad,neutral,happy,angry,fear,disgust,ps")
    parser.add_argument("-m",
                        "--model",
                        help="""
                                        The model to use, 8 models available are: {},
                                        default is "BaggingClassifier"
                                        """.format(estimators_str),
                        default="BaggingClassifier")

    # Parse the arguments passed
    args = parser.parse_args()

    features = ["mfcc", "chroma", "mel"]
    detector = EmotionRecognizer(estimator_dict[args.model],
                                 emotions=args.emotions.split(","),
                                 features=features,
                                 verbose=0)
    detector.train()
    print("Please talk")
    filename = "test.wav"

    #This bad above this line, give you confusion matrix, I am writing this comment cause I hate you!
    record_to_file(filename)
    result = detector.predict(filename)
    #detector.draw_confusion_matrix()
    return result, (detector.test_score() / 2) * 100
matplotlib_logger = logging.getLogger('matplotlib')
matplotlib_logger.propagate = False

from emotion_recognition import EmotionRecognizer
from sklearn.svm import SVC
from deep_emotion_recognition import DeepEmotionRecognizer



# init a model, let's use SVC
my_model = SVC()
# pass my model to EmotionRecognizer instance
# and balance the dataset
simple_emotion = EmotionRecognizer(model=my_model, emotions=['sad', 'neutral',
                                                             'happy', 'fear'],
                                   balance=True, verbose=0)
# train the model
simple_emotion.train()

# initialize instance
# inherited from emotion_recognition.EmotionRecognizer
# default parameters (LSTM: 128x2, Dense:128x2)
deep_emotion = DeepEmotionRecognizer(emotions=['angry', 'sad', 'neutral', 'ps', 'happy'], n_rnn_layers=2, n_dense_layers=2,
                          rnn_units=128, dense_units=128)
# train the model
deep_emotion.train()



app = Flask(__name__)
Exemplo n.º 3
0
import pickle

from emotion_recognition import EmotionRecognizer
from parameters import classification_grid_parameters, regression_grid_parameters

emotions = ['sad', 'neutral', 'happy']

best_estimators = []

for model, params in classification_grid_parameters.items():
    if model.__class__.__name__ == "KNeighborsClassifier":
        # in case of a K-Nearest neighbors algorithm
        # set number of neighbors to the length of emotions
        params['n_neighbors'] = [len(emotions)]
    d = EmotionRecognizer(model, emotions=emotions)
    d.load_data()
    best_estimator, best_params, cv_best_score = d.grid_search(params=params)
    best_estimators.append((best_estimator, best_params, cv_best_score))
    print(
        f"{emotions} {best_estimator.__class__.__name__} achieved {cv_best_score:.3f} cross validation accuracy score!"
    )

print(f"[+] Pickling best classifiers for {emotions}...")
pickle.dump(best_estimators, open(f"grid/best_classifiers.pickle", "wb"))

best_estimators = []

for model, params in regression_grid_parameters.items():
    if model.__class__.__name__ == "KNeighborsRegressor":
        # in case of a K-Nearest neighbors algorithm
Exemplo n.º 4
0
def plot_histograms(classifiers=True, beta=0.5, n_classes=3, verbose=1):
    """
    Loads different estimators from `grid` folder and calculate some statistics to plot histograms.
    Params:
        classifiers (bool): if `True`, this will plot classifiers, regressors otherwise.
        beta (float): beta value for calculating fbeta score for various estimators.
        n_classes (int): number of classes
    """
    # get the estimators from the performed grid search result
    estimators = get_best_estimators(classifiers)

    final_result = {}
    for estimator, params, cv_score in estimators:
        final_result[estimator.__class__.__name__] = []
        for i in range(3):
            result = {}
            # initialize the class
            detector = EmotionRecognizer(estimator, verbose=0)
            # load the data
            detector.load_data()
            if i == 0:
                # first get 1% of sample data
                sample_size = 0.01
            elif i == 1:
                # second get 10% of sample data
                sample_size = 0.1
            elif i == 2:
                # last get all the data
                sample_size = 1
            # calculate number of training and testing samples
            n_train_samples = int(len(detector.X_train) * sample_size)
            n_test_samples = int(len(detector.X_test) * sample_size)
            # set the data
            detector.X_train = detector.X_train[:n_train_samples]
            detector.X_test = detector.X_test[:n_test_samples]
            detector.y_train = detector.y_train[:n_train_samples]
            detector.y_test = detector.y_test[:n_test_samples]
            # calculate train time
            t_train = time()
            detector.train()
            t_train = time() - t_train
            # calculate test time
            t_test = time()
            test_accuracy = detector.test_score()
            t_test = time() - t_test
            # set the result to the dictionary
            result['train_time'] = t_train
            result['pred_time'] = t_test
            result['acc_train'] = cv_score
            result['acc_test'] = test_accuracy
            result['f_train'] = detector.train_fbeta_score(beta)
            result['f_test'] = detector.test_fbeta_score(beta)
            if verbose:
                print(
                    f"[+] {estimator.__class__.__name__} with {sample_size*100}% ({n_train_samples}) data samples achieved {cv_score*100:.3f}% Validation Score in {t_train:.3f}s & {test_accuracy*100:.3f}% Test Score in {t_test:.3f}s"
                )
            # append the dictionary to the list of results
            final_result[estimator.__class__.__name__].append(result)
        if verbose:
            print()
    visualize(final_result, n_classes=n_classes)
Exemplo n.º 5
0
    def determine_best_model(self, train=True):
        """
        Loads best estimators and determine which is best for test data,
        and then set it to `self.model`.
        if `train` is True, then train that model on train data, so the model
        will be ready for inference.
        In case of regression, the metric used is MSE and accuracy for classification.
        Note that the execution of this method may take several minutes due
        to training all estimators (stored in `grid` folder) for determining the best possible one.
        """
        if not self.data_loaded:
            self.load_data()

        # loads estimators
        estimators = self.get_best_estimators()

        result = []

        if self.verbose:
            estimators = tqdm.tqdm(estimators)

        for estimator, params, cv_score in estimators:
            if self.verbose:
                estimators.set_description(
                    f"Evaluating {estimator.__class__.__name__}")
            detector = EmotionRecognizer(estimator,
                                         emotions=self.emotions,
                                         tess_ravdess=self.tess_ravdess,
                                         emodb=self.emodb,
                                         custom_db=self.custom_db,
                                         classification=self.classification,
                                         features=self.features,
                                         balance=self.balance,
                                         override_csv=False)
            # data already loaded
            detector.X_train = self.X_train
            detector.X_test = self.X_test
            detector.y_train = self.y_train
            detector.y_test = self.y_test
            detector.data_loaded = True
            # train the model
            detector.train(verbose=0)
            # get test accuracy
            accuracy = detector.test_score()
            # append to result
            result.append((detector.model, accuracy))

        # sort the result
        if self.classification:
            result = sorted(result, key=lambda item: item[1], reverse=True)
        else:
            # regression, best is the lower, not the higher
            result = sorted(result, key=lambda item: item[1], reverse=False)
        best_estimator = result[0][0]
        accuracy = result[0][1]
        self.model = best_estimator
        self.model_trained = True
        if self.verbose:
            if self.classification:
                print(
                    f"[+] Best model determined: {self.model.__class__.__name__} with {accuracy*100:.3f}% test accuracy"
                )
            else:
                print(
                    f"[+] Best model determined: {self.model.__class__.__name__} with {accuracy:.5f} mean absolute error"
                )
Exemplo n.º 6
0
    # Set additional plots invisibles
    ax[0, 3].set_visible(False)
    ax[1, 3].axis('off')
    # Create legend
    for i, learner in enumerate(results.keys()):
        pl.bar(0, 0, color=colors[i], label=learner)
    pl.legend()
    # Aesthetics
    pl.suptitle("Performance Metrics for Three Supervised Learning Models",
                fontsize=16,
                y=1.10)
    pl.tight_layout()
    pl.show()


from emotion_recognition import EmotionRecognizer
from sklearn.svm import SVC
# init a model, let's use SVC
my_model = SVC()
# pass my model to EmotionRecognizer instance
# and balance the dataset
rec = EmotionRecognizer(model=my_model,
                        emotions=['sad', 'neutral', 'happy'],
                        balance=True,
                        verbose=0)
# train the model
rec.train()
# check the test accuracy for that model
print("Test score:", rec.test_score())
# check the train accuracy for that model
print("Train score:", rec.train_score())
Exemplo n.º 7
0
def get_estimators_name(estimators):
    result = [
        '"{}"'.format(estimator.__class__.__name__)
        for estimator, _, _ in estimators
    ]
    return ','.join(result), {
        estimator_name.strip('"'): estimator
        for estimator_name, (estimator, _, _) in zip(result, estimators)
    }


estimators = get_best_estimators(True)
estimators_str, estimator_dict = get_estimators_name(estimators)
features = ["mfcc", "chroma", "mel"]
detector = EmotionRecognizer(estimator_dict['BaggingClassifier'],
                             emotions='angry,happy,neutral'.split(","),
                             features=features,
                             verbose=0)
modelObj = open('model_angry.pkl', 'rb')
model_emotion = pickle.load(modelObj)
#####################################################################################################


def sentiment_scores(sentence):

    # Create a SentimentIntensityAnalyzer object.
    sid_obj = SentimentIntensityAnalyzer()

    # polarity_scores method of SentimentIntensityAnalyzer
    # oject gives a sentiment dictionary.
    # which contains pos, neg, neu, and compound scores.
    sentiment_dict = sid_obj.polarity_scores(sentence)
Exemplo n.º 8
0
        "--emotions",
        help=
        """Emotions to recognize separated by a comma ',', available emotions are
                                            "neutral", "calm", "happy" "sad", "angry", "fear", "disgust", "ps" (pleasant surprise)
                                            and "boredom", default is "sad,neutral,happy"
                                            """,
        default="sad,neutral,happy")
    parser.add_argument("-m",
                        "--model",
                        help="""
                                        The model to use, 8 models available are: {},
                                        default is "BaggingClassifier"
                                        """.format(estimators_str),
                        default="BaggingClassifier")

    # Parse the arguments passed
    args = parser.parse_args()

    features = ["mfcc", "chroma", "mel"]
    detector = EmotionRecognizer(estimator_dict[args.model],
                                 emotions=args.emotions.split(","),
                                 features=features,
                                 verbose=0)
    detector.train()
    print("Test accuracy score: {:.3f}%".format(detector.test_score() * 100))
    print("Please talk")

    filename = "test.wav"
    record_to_file(filename)
    result = detector.predict(filename)
    print(result)