Пример #1
0
def confusion_matrix_scores(gold_labs, pred_labs, scores=True):
    '''Draw a confusion matrix and calculate accuracy, precision and recall scores'''
    # Draw a confusion matrix
    if not gold_labs or not pred_labs:
        raise RuntimeError("One of the prediction lists is empty")
    if len(gold_labs) != len(pred_labs):
        raise RuntimeError(
            "The number of predictions != the number of gold labels")
    cm = ConfusionMatrix(gold_labs, pred_labs)
    print(cm.pretty_format(show_percents=False))

    # calculate accuracy, precision and recall for a SICK part (not for individual problems)
    try:
        pre = (cm[('E', 'E')] + cm[('C', 'C')]) / float(
            sum([cm[(i, j)] for i in 'NEC' for j in 'EC']))
    except:
        pre = 0
    try:
        rec = (cm[('E', 'E')] + cm[('C', 'C')]) / float(
            sum([cm[(i, j)] for i in 'EC' for j in 'NEC']))
    except:
        rec = 0
    try:
        acc = (cm[('E', 'E')] + cm[('C', 'C')] + cm[('N', 'N')]) / float(
            cm._total)
    except:
        acc = 0
    # print accuracy, precision and recall for a SICK part (not for individual problems)
    if scores:
        print("Accuracy: {:.2f}%\nPrecision: {:.2f}%\nRecall: {:.2f}%".format(
            acc * 100, pre * 100, rec * 100))
    return (acc, pre, rec), cm
 def show_results(self,gold,test):
     from nltk import ConfusionMatrix
     correct = 0
     for index,result in enumerate(gold):
         if result == test[index]:
             correct +=1
     print 'Accuracy: {:.2%}'.format(float(correct) / float(len(gold)))
     cm = ConfusionMatrix(gold, test)
     print cm.pp()
Пример #3
0
def my_classify_results(true_values,classified_values):
    cm = ConfusionMatrix(true_values, classified_values)
    tp,tn = cm.__getitem__(("pos","pos")),cm.__getitem__(("neg","neg"))
    fp,fn = cm.__getitem__(("neg","pos")),cm.__getitem__(("pos","neg"))
    print tp,tn,fp,fn
    allres = tp+fp+tn+fn
    precision = tp / (tp+fp)
    recall = tp / (tp+fn)
    return "Accuracy : %s\nPrecision : %s\nRecall : %s\nF-value : %s" % ((tp+tn)/allres, precision, recall, 2*precision*recall/(precision+recall))
def confusion_matrix(gold,guess):
	correct = 0
	total = len(gold)
	for i in range(len(gold)):
		if guess[i] == gold[i]:
			correct += 1
	accuracy = float(correct) / float(total)
	print('Accuracy: {:.2%}'.format(accuracy))

	# Confusion Matrix
	cm = ConfusionMatrix(gold, guess)
	print (cm.pp())
Пример #5
0
def confusion_matrix(gold, guess):
    correct = 0
    total = len(gold)
    for i in range(len(gold)):
        if guess[i] == gold[i]:
            correct += 1
    accuracy = float(correct) / float(total)
    print('Accuracy: {:.2%}'.format(accuracy))

    # Confusion Matrix
    cm = ConfusionMatrix(gold, guess)
    print(cm.pp())
Пример #6
0
def performance(classifier):
    test = []
    gold = []

    for i in range(len(test_features)):
        test.append(classifier.classify(test_features[i][0]))
        gold.append(test_features[i][1])

    matrix = ConfusionMatrix(gold, test)
    labels = {"female", "male"}

    tp = matrix["f", "f"]
    fn = matrix["m", "f"]
    fp = matrix["f", "m"]
    tn = matrix["m", "m"]

    precision_female = tp / (tp + fp)
    precision_male = tn / (tn + fn)
    recall_female = tp / (tp + fn
                          )  # actual female/ actual female + missed female
    recall_male = tn / (tn + fp)
    fscore_female = 2 * precision_female * recall_female / precision_female + recall_female
    fscore_male = 2 * precision_male * recall_male / precision_male + recall_male

    print(f"Precision for female names: {round(precision_female,2)}")
    print(f"Precision for male names: {round(precision_male,2)}")
    print(f"Recall for female names: {round(recall_female,2)}")
    print(f"Recall for male names: {round(recall_male,2)}")
    print(f"F-score for female names: {round(fscore_female,2)}")
    print(f"F-score for male names: {round(fscore_male,2)}")
    print("\n")
    def __init__(self,
                 X_test,
                 conv_layers,
                 flow,
                 mode,
                 path_saver,
                 Y_test=None):
        """
		The CNN_infer class is initialized with its arguments when its 
		instance is created.
		"""

        _, n_H0, n_W0, n_C0 = X_test.shape

        if mode == 'compare':
            _, n_y = Y_test.shape
            self.X, self.Y = self._create_placeholder(n_H0, n_W0, n_C0, n_y)

        elif mode == "predict":
            self.X = tf.placeholder(dtype=tf.float32,
                                    shape=[None, n_H0, n_W0, n_C0])

        self.parameters = self._initialize_parameters(conv_layers)
        self.Z_n = self._forward_propagation(flow)

        saver = tf.train.Saver()

        with tf.Session() as sess:

            saver.restore(sess, path_saver)
            self.logits = sess.run(self.Z_n, feed_dict={self.X: X_test})

            if mode == 'compare':

                correct_prediction = tf.equal(tf.argmax(self.Z_n, axis=1),
                                              tf.argmax(self.Y, axis=1))
                accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
                print("Test Accuracy : ",
                      accuracy.eval({
                          self.X: X_test,
                          self.Y: Y_test
                      }))

                logits_argmax = np.argmax(self.logits, axis=1)
                logits_argmax = [i for i in logits_argmax]
                y_argmax = np.argmax(Y_test, axis=1)
                y_argmax = [i for i in y_argmax]
                print(ConfusionMatrix(y_argmax, logits_argmax))

            elif mode == 'predict':

                prediction = np.argmax(self.logits)
                print("Your algorithm predicts Y = " + str(prediction))
Пример #8
0
def generate_report(gold, predict, labels, detailed=True):
    """
    Generate the classification report
    :param gold: the gold label
    :param predict: the predict label
    :param labels: label sets
    :return: none
    """
    if detailed:
        print('macro averge: %f' % precision_recall_fscore_support(gold, predict, average='macro'))
        print('micro average: %f' % precision_recall_fscore_support(gold, predict, average='micro'))
        print('weighted average: %f' % precision_recall_fscore_support(gold, predict, average='weighted'))
    else:
        import warnings
        warnings.filterwarnings("ignore")
    print('accuracy: %f' % accuracy_score(gold, predict))
    print(classification_report(gold, predict, target_names=labels))
    print(ConfusionMatrix(gold, predict))
Пример #9
0
 def __init__(self):
     self.dataSets = os.listdir('data/')
     # voor het gemak gaan we uit van twee annotators
     self.choice = -1
     self.annotation1 = {}
     self.annotation2 = {}
     self._loadAnnotations()
     # in welke categorie staat het event (0,1,2,3,4,5)?
     self.categoryEval = [[], []]
     # is het een event (0) of niet (1)?
     self.eventEval = [[], []]
     self._makeAnnotationLists()
     self.eventKappa = self._calculateKappa(self.eventEval)
     self.categoryKappa = self._calculateKappa(self.categoryEval)
     print("\neventKappa:", self.eventKappa)
     print("categoryKappa:", self.categoryKappa, '\n')
     print(ConfusionMatrix(self.judge1, self.judge2))
     print("Accuracy", accuracy(self.judge1, self.judge2))
def main(estimators=[LightGbmWithLogReg, Stacking],
         with_full_data_tfidf=True,
         cutoff=None):
    train_x, train_y, train_positions, train_file_names = get_data(
        main_dir=TRAINING_DIR)

    validation_x, validation_y, validation_positions, validation_file_names = get_data(
        main_dir=VALIDATION_DIR)

    if cutoff:
        train_x = train_x[:cutoff]
        validation_x = validation_x[:cutoff]
        train_y = train_y[:cutoff]
        validation_y = validation_y[:cutoff]
        train_positions = train_positions[:cutoff]

    clfs = [estimator() for estimator in estimators]
    predictions_df = pd.DataFrame()
    predictions_df['text'] = validation_x
    predictions_df['actual'] = validation_y

    for clf in clfs:
        t_start = time.time()
        if with_full_data_tfidf:
            clf.fit_with_test(train_x, train_y, validation_x)
        else:
            clf.fit(train_x, train_y)

        predictions = clf.predict(validation_x)
        t_end = time.time()
        m, s = divmod(t_end - t_start, 60)

        print(ConfusionMatrix(validation_y, predictions))

        acc = accuracy_score(validation_y, predictions)
        model_name = clf.params['model_description']

        print(model_name)
        print("Done in %fm and %fs" % (round(m), round(s)))
        print("Accuracy:" + str(acc))

        predictions_df[model_name] = predictions

    predictions_df.to_csv("data/output/model_predictions.csv", index=False)
def example(hmm, test_set, n1, n2):
    """Try to tag sentences between n1 and n2 (excluded) of the test set; just to show the result..."""
    estimated_tags = []
    gold_tags = []
    for test_sentence in test_set[n1:n2]:

        # the zip() function with the "*" operator can be used to unzip the list
        # see: https://stackoverflow.com/questions/7558908/unpacking-a-list-tuple-of-pairs-into-two-lists-tuples
        # [("this","is")]         [("PP","VB")]    <-- zip(*(["this","PP"],["is","VB"]))
        unlabelled_test_sentence, test_sentence_tags = zip(*test_sentence)

        # decoding...
        test_sentence_estimated_tags = hmm.best_path(unlabelled_test_sentence)

        # [("this","PP"),("is","VB")] --> "this/PP is/VB"
        print("Test: %s" %
              ' '.join([word + "/" + tag for (word, tag) in test_sentence]))

        # e.g.: zip(["this","is"],["PP","VB"]) ---> [("this","PP"),("is","VB")]
        print("HMM : %s" % ' '.join([
            word + "/" + tag for (word, tag) in zip(
                unlabelled_test_sentence, test_sentence_estimated_tags)
        ]))

        # e.g.: zip(['PP', 'NN', 'VB'],['PP', 'NN', 'NN']) --> [('PP','PP'),('NN','NN'),('VB','NN')]
        comparation_list = [
            1 if tag1 == tag2 else 0
            for (tag1,
                 tag2) in zip(test_sentence_tags, test_sentence_estimated_tags)
        ]  # e.g.: --> [1, 1, 0]

        print("Comparation:", comparation_list)
        print("Accuracy   : %.2f\n" %
              (sum(comparation_list) / len(test_sentence) *
               100))  # --> sum([1, 1, 0]) / 3 = 2/3

        estimated_tags += test_sentence_estimated_tags  # collects estimated tags, for further use
        gold_tags += test_sentence_tags  # collects correct tags, for further use

    # prints confusion matrix
    print(ConfusionMatrix(gold_tags, estimated_tags))
Пример #12
0
def train(train_data,
          valid_data,
          estimator=StackingSimple,
          cv_split=10,
          with_cross_validation=False,
          with_validation=False,
          with_full_data_tfidf=False,
          train_with_validation=True,
          path_to_save_model=""):

    train_x, train_y, train_positions = split_data(train_data)

    if train_with_validation:
        validation_x, validation_y, validation_positions = split_data(
            valid_data)
        train_x.extend(validation_x)
        train_y.extend(validation_y)

    clf, cv, val, gs = None, None, None, None

    if estimator:
        clf = estimator()

    if with_cross_validation:
        if with_full_data_tfidf:
            skf = StratifiedKFold(n_splits=cv_split,
                                  random_state=42,
                                  shuffle=True)
            all_acc = []
            X = np.array(train_x)
            y = np.array(train_y)
            for train_index, test_index in skf.split(X, y):
                y_train, y_test = y[train_index], y[test_index]
                X_train, X_test = X[train_index], X[test_index]
                print(X_train.shape)

                clf.fit_with_test(X_train.tolist(), y_train, train_positions,
                                  X_test.tolist())
                predictions = clf.predict(X_test.tolist())
                all_acc.append(accuracy_score(y_test, predictions))

            print("Accuracies:", all_acc)
            print("Mean:", np.mean(all_acc))
            print("Stdev:", np.std(all_acc))

        else:
            cv = cross_validate(
                estimator=clf,
                X=train_x,
                y=train_y,
                fit_params={'train_positions': train_positions},
                cv=cv_split,
                scoring="accuracy",
                n_jobs=get_n_jobs(),
                return_train_score=True)

    if with_validation:
        t_start = time.time()
        if with_full_data_tfidf:
            clf.fit_with_test(train_x, train_y, train_positions, validation_x)
        else:
            clf.fit(train_x, train_y, train_positions)

        predictions = clf.predict(validation_x)
        t_end = time.time()

        print(ConfusionMatrix(validation_y, predictions))

        val = {
            'accuracy': accuracy_score(validation_y, predictions),
            'time': t_end - t_start
        }
        print(val['accuracy'])
    else:
        clf.fit(train_x, train_y)
    clf.save_model(path_to_save_model)
Пример #13
0
logreg_accy = round(accuracy_score(Target_pred, Target_test), 3)
print(logreg_accy)  ## 79.8% Accuracy

##cross validation

from sklearn import metrics, cross_validation
predicted_cv = cross_validation.cross_val_predict(logreg, X, Target, cv=10)
metrics.accuracy_score(Target, predicted_cv)  ## 86.05 %

from sklearn.cross_validation import cross_val_score
accuracy_cv = cross_val_score(logreg, X, Target, cv=10, scoring='accuracy')
print(accuracy_cv)
print(cross_val_score(logreg, X, Target, cv=10, scoring='accuracy').mean())

from nltk import ConfusionMatrix
print(ConfusionMatrix(list(Target), list(predicted_cv)))

## Decision tree ##
##Cross Validation

from sklearn.tree import DecisionTreeClassifier

dectree = DecisionTreeClassifier(max_depth=3,
                                 class_weight='balanced',
                                 min_weight_fraction_leaf=0.01)

depth = []
for i in range(3, 20):
    clf = DecisionTreeClassifier(max_depth=i)
    # Perform 7-fold cross validation
    scores = cross_val_score(estimator=clf, X=X, y=Target, cv=7, n_jobs=4)
Пример #14
0
test_tweets = BuildFeatureVector(all_tweet_array[training_size:])

print len(test_tweets)

training_set = nltk.classify.apply_features(extract_features, train_tweets)
test_set = nltk.classify.apply_features(extract_features, test_tweets)

NBClassifier = nltk.NaiveBayesClassifier.train(training_set)

NBClassifier.show_most_informative_features(20)

TestSet(all_tweet_array[training_size:])

print ''
print 'TRAINING accuracy:', nltk.classify.accuracy(NBClassifier, training_set)
print 'TEST accuracy:', nltk.classify.accuracy(NBClassifier, test_set)
print ''
print 'NEU precision:', precision(refSet['NEU'], testSet['NEU'])
print 'NEU recall:', recall(refSet['NEU'], testSet['NEU'])
print 'NEU F-measure:', f_measure(refSet['NEU'], testSet['NEU'])
print ''
print 'POS precision:', precision(refSet['POZ'], testSet['POZ'])
print 'POS recall:', recall(refSet['POZ'], testSet['POZ'])
print 'POS F-measure:', f_measure(refSet['POZ'], testSet['POZ'])
print ''
print 'NEG precision:', precision(refSet['NEG'], testSet['NEG'])
print 'NEG recall:', recall(refSet['NEG'], testSet['NEG'])
print 'NEG F-measure:', f_measure(refSet['NEG'], testSet['NEG'])
print ''
print ConfusionMatrix(refSetF, testSetF)
Пример #15
0
## Part 2

X = data[cols[1:-1]]
y = data.binary

X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=123)

## Part 3

from sklearn.linear_model import LogisticRegression     # import the estimator we want
logreg = LogisticRegression()                           # instantiate estimator
logreg.fit(X_train, y_train)                            # fit with training data
preds = logreg.predict(X_test)                          # predict for test data
probs = logreg.predict_proba(X_test)                    # predict probabilities for AUC

print(ConfusionMatrix(list(y_test), list(preds)))       # see how we did

print(metrics.accuracy_score(y_test, preds))            # check accuracy (88.89%)

print(metrics.roc_auc_score(y_test, probs[:,1]))        # check AUC (93.98%)

## Part 4

# train first model (logistic regression):
mod1 = LogisticRegression()
scores1 = cross_val_score(mod1, X, y, cv=5, scoring='roc_auc')

# train second model (KNN, K=1):
from sklearn.neighbors import KNeighborsClassifier
mod2 = KNeighborsClassifier(n_neighbors=1)
scores2 = cross_val_score(mod2, X, y, cv=5, scoring='roc_auc')
    #print(features)
    return features


random.shuffle(tweets)

v_train = tweets[:2000]
v_test = tweets[:2000]

print("Training...")
training_set = apply_features(extract_features, v_train)
classifier = NaiveBayesClassifier.train(training_set)

tweet = "this movie is sweet and astonishing"
print(classifier.classify(extract_features(tweet)))

#print(classifier.show_most_informative_features(32))

print("Test...")
test_set = apply_features(extract_features, v_test)
print('\nAccuracy %f\n' % accuracy(classifier, test_set))

# build confusion matrix over test set
test_truth = [s for (t, s) in v_test]
test_predict = [classifier.classify(t) for (t, s) in test_set]

print('Confusion Matrix')
print(ConfusionMatrix(test_truth, test_predict))

print("FIN")
Пример #17
0
def exercise4(dataset, runs=5, test_portion=0.50):

    LOGGER.info('Building datasets...')
    # Build Test and Training Review Sets
    test_reviews = None
    train_reviews = None
    predetermined = None
    overall_classifications = []
    accuracies = []
    rmses = []
    for n in range(runs):
        if dataset.test and dataset.train:
            test_reviews = dataset.test
            train_reviews = dataset.train
            predetermined = True
        else:
            test_reviews, train_reviews = dataset.make_author_test_train(
                test_portion)
            predetermined = False
        if not predetermined:
            LOGGER.info('Run %d of %d', n + 1, runs)

        LOGGER.info('Building features...')
        # Build Features
        test_features = [(extract_features4(r), r.author)
                         for r in test_reviews]
        train_features = [(extract_features4(r), r.author)
                          for r in train_reviews]

        LOGGER.info('Building classifier...')
        # Build Classifier
        LOGGER.info('Training Examples: %d', len(train_reviews))
        LOGGER.info('Training Features: %d', len(train_features))
        classifier = nltk.NaiveBayesClassifier.train(train_features)
        #classifier = nltk.DecisionTreeClassifier.train(train_features)

        LOGGER.info('Checking accuracy...')
        # Perform Classification
        classifications = []
        for t in test_features:
            classifications.append((t[1], classifier.classify(t[0])))

        LOGGER.info('Printing results...')
        classifications.sort()
        accuracy = nltk.classify.accuracy(classifier, test_features)
        rmse = math.sqrt(
            sum(1
                for a, c in classifications if a != c) / len(classifications))
        confusion = ConfusionMatrix([ref for ref, test in classifications],
                                    [test for ref, test in classifications])
        overall_classifications.extend(classifications)
        if not predetermined:
            HEADER = ('ACTUAL', 'CLASSIFIED')
            col_width = max(len(a) for a, c in (classifications + [HEADER]))
            for a, c in ([HEADER] + classifications):
                print("Exercise 4: %s %s" % (a.ljust(col_width), c))
        print("Exercise 4: %.3f" % (accuracy, ))
        print("Exercise 4: Average RMSE Error: %.3f" % (rmse, ))

        if predetermined:
            return accuracy
        print('Exercise 4: Confusion Matrix:\n%s' % (confusion.pretty_format(
            show_percents=False, values_in_chart=True), ))
        accuracies.append(accuracy)
        rmses.append(rmse)
    overall_confusion = ConfusionMatrix(
        [ref for ref, test in overall_classifications],
        [test for ref, test in overall_classifications])
    print('Exercise 4: Overall Confusion Matrix:\n%s' %
          (overall_confusion.pretty_format(show_percents=False,
                                           values_in_chart=True), ))
    print("Exercise 4: Runs: %d Average     : %.3f Max: %.3f Min: %.3f" %
          (runs, sum(accuracies) / len(accuracies), max(accuracies),
           min(accuracies)))
    print("Exercise 4: Runs: %d Average RMSE: %.3f Max: %.3f Min: %.3f" %
          (runs, sum(rmses) / len(rmses), max(rmses), min(rmses)))
    return accuracies
Пример #18
0
files = ["Clov","Hamm","Monologue","Nagg","Nell"]
ref = ""
tagged = ""
index = 0

for file in listdir("C:\Users\Cassie\Google Drive\MSiA\Fall 2014\MSiA 490\hw\characters"):
    path = 'C:\Users\Cassie\Google Drive\MSiA\Fall 2014\MSiA 490\hw\characters'
    filepath = path + '\\' + file
    with open (filepath, "r") as myfile:
        index +=1
        for line in myfile:
            ref = ref + ',' + files[index-1]
            tag = classi.classify(word_feats(line))
            tagged = tagged + ',' + tag

print ConfusionMatrix(ref.split(',')[1:], tagged.split(',')[1:])
             
'''
soup = BeautifulSoup(open("wiki.html"))
tables = soup.findAll("table", { "class" : "wikitable sortable" })
ref = ""
tagged = ""
continents = ["Africa","Asia","Europe","North America","South America","Oceania","Antrctica"]
index = 0
for table in tables:
    index +=1
    for row in table.findAll('tr'):
        cells = row.findAll("td")
        if len(cells) == 5:       
            if cells[2].find('a',text=True) is not None:
                b = cells[2].find('a',text=True)
Пример #19
0
def score(change):
    if change <-.02:
        return "down"
    elif change <.02:
        return "flat"
    else:
        return "up"

#Generate Features & Results 
training_data = [(FreqDist(d["tokens"]), score(d["2-2dayPriceChange"])) for d in training]
test_features = [FreqDist(d["tokens"]) for d in testing]
test_results = [score(d["2-2dayPriceChange"]) for d in testing]

#Train Model
model = nltk.NaiveBayesClassifier.train(training_data)
 
#Generate Predictions
preds = model.classify_many(test_features)

#Print Results
amounts = [ (direction, len([ t for t in test_results if t ==direction])) for direction in ["down", "flat", "up"]]
print(amounts)
print("Majority Baseline: %.2f" % (max([b for a,b in amounts]) / len(test_results)))

print("Accuracy: %.2f" % (nltk.accuracy(preds, test_results)))

print(ConfusionMatrix(preds, test_results))
 
print(model.show_most_informative_features(10))

    def test(self, lstm_pos_tagger, test_file_path):
        plot_confusion_matrix = False
        test = Conllu_Manager('test',
                              embedding='mine',
                              dataSetPath=test_file_path)
        posTaggedSentenceFilePath = os.path.join(os.getcwd(), 'output',
                                                 'pos_tagged_sentences.txt')
        resultFilePath = os.path.join(os.getcwd(), 'output', 'result.txt')
        fp_post_tagged = open(posTaggedSentenceFilePath, 'w')
        fp_result = open(resultFilePath, 'w')
        x, y = test.getSentences()
        y_true = []
        for i in range(len(y)):
            y_true.extend(y[i].ravel())

        y_pred = []
        t = tqdm(range(len(x)))
        for i in t:
            y_sentence_pred = lstm_pos_tagger.predict(x[i])
            y_pred.extend(y_sentence_pred)
            sentence_to_write = ''
            tag_predicted_to_write = ''
            for w in x[i]:
                sentence_to_write += '{} '.format(w)
            for t in y_sentence_pred:
                tag_predicted_to_write += '{} '.format(t)
            fp_post_tagged.write('{}\n'.format(sentence_to_write))
            fp_post_tagged.write('{}\n'.format(tag_predicted_to_write))
            P_, R_, F1_, S_ = precision_recall_fscore_support(y[i],
                                                              y_sentence_pred,
                                                              average='micro')
            fp_result.write(
                'Precision: {} , Recall {} , F1 {} , Coverage {} \n'.format(
                    P_, R_, F1_, 1))

        fp_post_tagged.close()
        fp_result.close()

        print(ConfusionMatrix(y_true, y_pred))
        P, R, F1, S = precision_recall_fscore_support(y_true,
                                                      y_pred,
                                                      average='micro')

        if plot_confusion_matrix:
            labels_name = []
            for key in test.t2i:
                labels_name.append(key)
            cnf_matrix = confusion_matrix(y_true, y_pred)
            cnf_matrix = cnf_matrix.astype('float64') / cnf_matrix.sum(
                axis=1)[:, np.newaxis]
            for x in range(0, cnf_matrix.shape[0]):
                for y in range(0, cnf_matrix.shape[1]):
                    if cnf_matrix[x][y] != int(cnf_matrix[x][y]):
                        cnf_matrix[x][y] = round(cnf_matrix[x][y], 3)
            # Plot
            plt.figure(figsize=(15, 15))
            df_cm = pd.DataFrame(cnf_matrix,
                                 index=labels_name,
                                 columns=labels_name)
            sn.set(font_scale=0.85)
            sn.heatmap(df_cm,
                       annot=True,
                       annot_kws={"size": 10},
                       cmap=plt.cm.Blues,
                       square=True,
                       linewidths=.1)
            figurePath = os.path.join(os.getcwd(), 'latec',
                                      'confusionMatrix.png')
            plt.savefig(figurePath, dpi=300)
            plt.close()
        return dict(precision=P, recall=R, coverage=S, f1=F1)
Пример #21
0
train['60fretbinary'] = y_train_simple
test = pd.DataFrame(data=X_test_simple, columns=['ZYX5minSentiment'])
test['60fretbinary'] = y_test_simple

#Run a logistic regression
testfret_simple = LogisticRegression()
testfret_simple.fit(train[['ZYX5minSentiment']], y_train_simple)
B1 = testfret_simple.coef_[0][0]
B0 = testfret_simple.intercept_[0]
np.exp(B1)

testfret_simple.score(X_test_simple, y_test_simple)

preds_simple = testfret_simple.predict(X_test_simple)
print metrics.accuracy_score(y_test_simple, preds_simple)
print ConfusionMatrix(list(y_test_simple), list(preds_simple))

#Now look at more variables
X = data[[
    'ZYX10minSentiment', 'ZYX20minSentiment', 'ZYX30minSentiment',
    'ZYX60minSentiment', 'ZYX10minTweets', 'ZYX20minTweets', 'ZYX30minTweets',
    'ZYX60minTweets', 'ZYX10minPriceChange', 'ZYX20minPriceChange',
    'ZYX30minPriceChange', 'ZYX60minPriceChange'
]]
y = data['60fretbinary']
X_train, X_test, y_train, y_test = train_test_split(X,
                                                    y,
                                                    test_size=0.3,
                                                    random_state=1)
X = d[[
    'ZYX10minSentiment', 'ZYX20minSentiment', 'ZYX30minSentiment',
def test(hmms, test_set, index_features):
    total = 0
    correct = 0
    correct_cat = []
    predicted_cat = []
    for cat in test_set.keys():
        for sentence in test_set[cat]:
            '''test_sentence = [(tuple(word),'') for word in sentence]'''
            '''feature subset selection'''
            new_sentence = []
            for word in sentence:
                new_word = []
                for feature in index_features:
                    new_word.append(word[feature])
                new_sentence.append(new_word)
            test_sentence = [(tuple(word), '') for word in new_sentence]

            if verbose:
                pass

            max_prob = -1
            sentence_cat = random.choice(
                test_set.keys())  #assign a random category just to initialize

            # test the probabilities that the sentence is of a type of emotion
            # the higher probability is the winner
            #if verbose:
            #    print "Probabilities of each hmm : "
            for c in hmms.keys():
                sentence_prob = hmms[c].probability(test_sentence)
                #if verbose:
                #    print c," : ",sentence_prob
                if sentence_prob > max_prob:
                    sentence_cat = c
                    max_prob = sentence_prob
            #if verbose:
            #    print ""
            #print ""

            correct_cat.append(
                cat
            )  # we save the correct category in a list that we'll use to build ConfusionMatrix
            predicted_cat.append(
                sentence_cat
            )  # we save the category predicted in a list that we'll use to build ConfusionMatrix
            if (cat == sentence_cat):
                correct += 1
            total += 1
    try:
        accuracy = ((correct / total) * 100)
    except ZeroDivisionError:
        accuracy = 0  # error

    # the confusionMatrix function needs the list of the correct label and the list of the predicted
    matrix = ConfusionMatrix(correct_cat, predicted_cat)

    print "correct:", correct
    print "total:", total
    print "the accuracy is: %.2f%%" % accuracy
    print matrix
    return accuracy, matrix
Пример #23
0
def main(estimator=MLP,
         cv_split=5,
         with_cross_validation=True,
         with_validation=False,
         with_test=False,
         with_external_data=False,
         validate_on_external=False,
         with_grid_search=False,
         with_full_data_tfidf=False,
         train_with_validation=False,
         predict_breaches=True,
         train_on_breach=True,
         cutoff=None):

    if validate_on_external:
        train_x, validation_x, train_y, validation_y = get_external_data(
            TRAINING_EXTERNAL_FILE, 3000, 1500)
        train_positions = []
    elif train_on_breach:
        train_x, train_y, train_positions, train_file_names = get_data(
            main_dir=BREACH_DIR, external_file=None, breach=True)
    else:
        train_x, train_y, train_positions, train_file_names = get_data(
            main_dir=TRAINING_DIR,
            external_file=TRAINING_EXTERNAL_FILE
            if with_external_data else None)

        # return print_splits(train_x[:3000], train_positions[:3000])

        if train_with_validation:
            validation_x, validation_y, validation_positions, validation_file_names = get_data(
                main_dir=VALIDATION_DIR)
            train_x.extend(validation_x)
            train_y.extend(validation_y)

        if cutoff:
            train_x = train_x[:cutoff]
            train_y = train_y[:cutoff]
            train_positions = train_positions[:cutoff]

    print("Training on {0} examples".format(len(train_x)))

    clf, cv, val, gs = None, None, None, None

    if estimator:
        clf = estimator()

    if with_cross_validation:
        if with_full_data_tfidf:
            skf = StratifiedKFold(n_splits=cv_split,
                                  random_state=42,
                                  shuffle=True)
            all_acc = []
            X = np.array(train_x)
            y = np.array(train_y)
            for train_index, test_index in skf.split(X, y):
                y_train, y_test = y[train_index], y[test_index]
                X_train, X_test = X[train_index], X[test_index]
                print(X_train.shape)

                clf.fit_with_test(X_train.tolist(), y_train, train_positions,
                                  X_test.tolist())
                predictions = clf.predict(X_test.tolist())
                all_acc.append(accuracy_score(y_test, predictions))

            print("Accuracies:", all_acc)
            print("Mean:", np.mean(all_acc))
            print("Stdev:", np.std(all_acc))

        elif train_on_breach:
            skf = StratifiedKFold(n_splits=cv_split,
                                  random_state=42,
                                  shuffle=True)
            f_scores = []
            diff = []
            r = []
            p = []
            all_acc = []
            X = np.array(train_x)
            y = np.array(train_y)
            pos = np.array(train_positions)
            for train_index, test_index in skf.split(X, y):
                y_train, y_test = y[train_index], y[test_index]
                X_train, X_test = X[train_index], X[test_index]
                pos_train, pos_test = pos[train_index], pos[test_index]
                print(X_train.shape)

                clf.fit_with_test(X_train.tolist(), y_train, train_positions,
                                  X_test.tolist())

                change_predictions = clf.predict(X_test.tolist())
                tn, fp, fn, tp = confusion_matrix(y_test,
                                                  change_predictions).ravel()
                print('tn: {}, fp: {}, fn: {}, tp: {}'.format(tn, fp, fn, tp))
                all_acc.append(accuracy_score(y_test, change_predictions))
                predictions = get_breach_predictions(clf, X_test.tolist(),
                                                     change_predictions)
                totalWinDiff, totalWinR, totalWinP, totalWinF, outStr = evaluate(
                    X_test, pos_test, predictions)
                print("%s" % outStr)
                diff.append(totalWinDiff)
                r.append(totalWinR)
                p.append(totalWinP)
                f_scores.append(totalWinF)

            print("Mean diff:", np.mean(diff))
            print("Mean r:", np.mean(r))
            print("Mean p:", np.mean(p))
            print("Mean f:", np.mean(f_scores))

            print("Accuracies:", all_acc)
            print("Mean:", np.mean(all_acc))
            print("Stdev:", np.std(all_acc))

            # for m in all_measures:
            #     print("%s" % m)
            #     print('----------------------------------')

        else:
            cv = cross_validate(
                estimator=clf,
                X=train_x,
                y=train_y,
                fit_params={'train_positions': train_positions},
                cv=cv_split,
                scoring="accuracy",
                n_jobs=get_n_jobs(),
                return_train_score=True)

    if with_grid_search:
        clf, best_score = __grid_search(clf, clf.get_grid_params(), train_x,
                                        train_y)
        gs = {'accuracy': best_score}

    if with_validation:
        if not validate_on_external:
            validation_x, validation_y, validation_positions, validation_file_names = get_data(
                main_dir=VALIDATION_DIR)

        if cutoff:
            validation_x = validation_x[:cutoff]
            validation_y = validation_y[:cutoff]

        t_start = time.time()
        if with_full_data_tfidf:
            clf.fit_with_test(train_x, train_y, train_positions, validation_x)
        else:
            clf.fit(train_x, train_y, train_positions)

        if predict_breaches:
            predictions = get_breach_predictions(clf, validation_x,
                                                 validation_y)
        else:
            predictions = clf.predict(validation_x)
        t_end = time.time()

        if predict_breaches:
            persist_output(OUTPUT_DIR,
                           predictions,
                           validation_file_names,
                           breach=predict_breaches)
            print("%s" %
                  evaluate(validation_x, validation_positions, predictions))
        else:
            print(ConfusionMatrix(validation_y, predictions))

            val = {
                'accuracy': accuracy_score(validation_y, predictions),
                'time': t_end - t_start
            }

    if with_test:
        test(clf, train_x, train_y, train_positions, with_full_data_tfidf)

    results = get_results(len(train_x),
                          clf_params=clf.params,
                          cv=cv,
                          val=val,
                          gs=gs)
    print(results)

    if config_local().get('persist_results', False):
        write_results_to_file(results)
d_2014['probs2014'] = logReg.predict_proba(d_2014[col_c])[:, 1]

df =d_2014[['player','team','pred2014','probs2014']]
null = 1 - sum(d.team) / float(len(d.team))
#Actuately predicting 8 award winners with a probablity 0f .5 or greater



#Create predictions using the model on the test set
test['pred_class'] = logReg.predict(test[col_c ])


# nicer confusion matrix
from nltk import ConfusionMatrix
print ConfusionMatrix(list(y_test), list(preds))


#229 possible yes; 57%

## ROC CURVE and AUC

#Do I want it on X_test or 2014 data?
probs = logReg.predict_proba(X_test)[:, 1]
from sklearn import metrics


# plot ROC curve
fpr, tpr, thresholds = metrics.roc_curve(y_test, probs)
plt.plot(fpr, tpr)
plt.xlim([0.0, 1.0])
Пример #25
0
def evaluate(fragments,
             sumfunc,
             condition,
             normalization,
             verbose=True,
             perbook=False,
             topfragments=False,
             breakdown=True,
             conftable=False):
    green = "\033[32m"
    red = "\033[31m"
    gray = "\033[0m"  # ANSI codes
    names = set(map(getauthor, fragments.values()[0]))
    results = {}
    # heading
    if verbose and not perbook:
        print "\n &", 21 * " ",
        print "&".join(a.rjust(16) for a in sorted(names)),
        print "&\tguess &\t\t\tconfidence\\\\"
    prev = "foo.bar"
    # loop over texts to be classified
    for text in sorted(fragments):
        if perbook and getauthor(text) != getauthor(prev):
            print "\n &", 21 * " ",
            print " &".join(
                "\\rotatebox{45}{%s}" %
                a.split(" - ")[-1].split(".")[0].replace("&", "\\&")
                for a in sorted(fragments[text])), "\\\\"
        if verbose:
            print text.split(" - ")[-1].split(".")[0][:25].replace(
                "&", "\\&").ljust(25),
        inter = {}
        # loop over possible authors
        for author in sorted(fragments[text]):
            inter[author] = sum(
                map(sumfunc, filter(condition, fragments[text][author].items())
                    )) / normalization(text, author)
        if verbose:
            for author in sorted(inter):
                if inter[author] == max(inter.values()):
                    l, r = "\\textbf{", "}"
                else:
                    l, r = "".ljust(8), " "
                if isinstance(inter[author], float):
                    print("& %s%.2f%s" % (l, inter[author], r)).rjust(16),
                elif isinstance(inter[author], int):
                    print("& %s%d%s" % (l, inter[author], r)).rjust(16),
                else:
                    print "& %s%s" % (l, repr(inter[author]).rjust(8), r),
        actualauthor = getauthor(text)
        guess = max(inter, key=inter.get)
        results.setdefault(actualauthor, []).append(guess)
        if verbose and not perbook:
            print "&",
            print green + "correct:" if getauthor(
                guess) == actualauthor else red + "wrong:  ",
            print getauthor(guess).ljust(10), gray,
            try:
                confidence = (
                    100 * (max(inter.values()) - sorted(inter.values())[-2]) /
                    float(max(inter.values())))
            except ZeroDivisionError:
                confidence = 0.0
            except IndexError:
                confidence = 0.0
            print "& %s%5.2f%s " % (
                (red if confidence < 50 else green), confidence, gray)
        elif verbose:
            print "\\\\"
        prev = text
    if verbose: print

    if topfragments: print "top fragments"
    for name in sorted(names) if topfragments else ():
        for text in sorted(fragments):
            if not getauthor(text) == name: continue
            print text
            for label in ("(ROOT", "(S ", "(NP ", "(VP ", "(PP "):
                guess = max(fragments[text],
                            key=lambda x: sum(
                                sumfunc(a) for a in fragments[text][x].items()
                                if condition(a)) / norm(x))
                try:
                    frag = max((a[0]
                                for a in fragments[text][guess].iteritems()
                                if condition(a) and a[0].startswith(label)),
                               key=lambda x: (sumfunc((x, fragments[text][
                                   guess][x])), fragments[text][guess][x]))
                except ValueError:
                    pass
                else:
                    f1 = Tree(frag)
                    f2 = Tree(frag)
                    print "%2d" % fragments[text][guess][frag], " ".join(
                        a.replace(" ", "_")[:-1]
                        for a in re.findall(r" \)|[^ )]+\)", frag)),
                    try:
                        f2.un_chomsky_normal_form()
                    except:
                        print f1.pprint(margin=9999, parens=("[", " ]"))
                    else:
                        print f2.pprint(margin=9999, parens=("[", " ]"))
        print
    if perbook: return
    if topfragments: print

    if conftable:
        print "Confusion matrix"
        ref = [a for a in results for b in results[a]]
        test = [getauthor(b) for a in results for b in results[a]]
        cf = ConfusionMatrix(ref, test)
        print '\t\t&%s\\\\' % "\t& ".join(sorted(set(test)))
        for a in sorted(set(ref)):
            print a.ljust(15),
            for b in sorted(set(test)):
                c = "& "
                if a == b: c = ("& \\textbf{%d}" % cf[a, b])
                elif cf[a, b]: c = ("& %d" % cf[a, b])
                print c.rjust(10),
            print r"\\"
        print

    avg = sum(1 for a in results
              for b in results[a] if a == getauthor(b)) / float(
                  sum(map(len, results.values())))
    if breakdown:
        print "Accuracy"
        z = []
        for a in sorted(results):
            acc = sum(1 for b in results[a] if a == getauthor(b)) / float(
                len(results[a]))
            print getauthor(a).ljust(16), "&   ",
            print "%.2f \\%% \\\\" % (100 * acc)
            z.append(acc)
        print "macro average:".ljust(
            16), "&   %6.2f \\%% \\\\" % (100 * sum(z) / float(len(z)))
        print "micro average:".ljust(16), "&   %6.2f \\%% \\\\" % (100 * avg)
    else:
        print "average:".ljust(16), "&   %6.2f \\%% \\\\" % (100 * avg)