Esempio n. 1
0
def run(emotion):
    training_vectors, test_vectors, training_labels, test_labels = fetch_data(
        emotion)
    classifier = GaussianNB()
    classifier.fit(training_vectors.toarray(), training_labels)
    print(emotion, ':: Naive Bayes Accuracy:',
          classifier.score(test_vectors.toarray(), test_labels))
Esempio n. 2
0
def run(emotion):
    training_vectors, test_vectors, training_labels, test_labels = fetch_data(emotion)
    n_neighbors = [3, 5, 7, 8, 10, 12, 15]
    accuracies = []
    for n in n_neighbors:
        classifier = KNeighborsClassifier(n_neighbors=n)
        classifier.fit(training_vectors, training_labels)
        accuracies.append(classifier.score(test_vectors, test_labels))
    plt.plot(n_neighbors, accuracies, label=emotion)
Esempio n. 3
0
def run(emotion):
    training_vectors, test_vectors, training_labels, test_labels = fetch_data(
        emotion)
    alphas = [0.0001, 0.001, 0.01, 0.1]
    accuracies = []
    for a in alphas:
        classifier = MLPClassifier(alpha=a)
        classifier.fit(training_vectors, training_labels)
        accuracies.append(classifier.score(test_vectors, test_labels))
    plt.plot(alphas, accuracies, label=emotion)
Esempio n. 4
0
def run(emotion):
    training_vectors, test_vectors, training_labels, test_labels = fetch_data(
        emotion)
    estimators = [10, 50, 100, 1000]
    accuracies = []
    for estimator in estimators:
        classifier = RandomForestClassifier(n_estimators=estimator)
        classifier.fit(training_vectors, training_labels)
        accuracies.append(classifier.score(test_vectors, test_labels))
        print(accuracies)
    plt.plot(estimators, accuracies, label=emotion)
Esempio n. 5
0
def run(emotion):
    training_vectors, test_vectors, training_labels, test_labels = fetch_data(
        emotion)
    learning_rates = [
        0.1, 0.25, 0.5, 0.7, 0.9, 1, 1.1, 1.25, 1.5, 1.75, 2, 2.1, 2.25, 2.5,
        3, 3.1, 3.5
    ]
    accuracies = []
    for n in learning_rates:
        classifier = AdaBoostClassifier(learning_rate=n)
        classifier.fit(training_vectors, training_labels)
        accuracies.append(classifier.score(test_vectors, test_labels))
    plt.plot(learning_rates, accuracies, label=emotion)
Esempio n. 6
0
        if self.model_name:
            from pyfasttext import FastText
            predictor = FastText()
            predictor.load_model('ft_extras/'+self.model_name+'.bin')
            predicted_labels = predictor.predict_proba(test_set)
            if report_accuracy and test_labels_vector:
                test_set_size = len(test_set)
                correct_predictions = 0
                invalid_labels = 0
                for index, labels in enumerate(predicted_labels):
                    if len(labels) != 0:
                        best_label = max(labels,key=lambda label:label[1])
                        if best_label[0] == test_labels_vector[index]:
                            correct_predictions += 1
                    else:
                        invalid_labels += 1
                        continue
                print('Prediction accuracy:{}\n'.format(correct_predictions / (test_set_size - invalid_labels)))
        else:
            print('Please use the train method to train a model first.')
            return

if __name__ == '__main__':
    emotions = ['joy', 'fear', 'anger', 'sadness']
    classifier = Classifier()
    from classifier.preclassifier import fetch_data
    for emotion in emotions:
        print('Predicting for {}:'.format(emotion))
        train_tweets, test_tweets, train_labels, test_labels = fetch_data(emotion,vectorize=False)
        classifier.train(train_tweets,train_labels)
        classifier.predict(test_tweets,test_labels)