Example #1
0
def write_score(name, gold_labels, pred_scores, classes, average_classes):
    classes, average_classes = np.array(classes), np.array(average_classes)
    gold_scores = LabelBinarizer().fit(classes).transform(gold_labels)
    pred_labels = classes[np.argmax(pred_scores, axis=1)]

    with closing(Tee('{}.txt'.format(name), 'w')):
        precision, recall, fscore, _ = precision_recall_fscore_support(gold_labels, pred_labels, labels=classes)
        for t in zip(classes, precision, recall, fscore):
            print('{}: P={:.2f}, R={:.2f}, F1={:.2f}'.format(*t))
        print('Accuracy: {:.4f}'.format(accuracy_score(gold_labels, pred_labels)))
        print('F1 average: {:.4f}'.format(np.mean(fscore[LabelEncoder().fit(classes).transform(average_classes)])))

    with PdfPages('{}.pdf'.format(name)) as pdf:
        fpr = {}
        tpr = {}
        roc_auc = {}
        for i in range(len(classes)):
            fpr[i], tpr[i], _ = roc_curve(gold_scores[:, i], pred_scores[:, i])
            roc_auc[i] = auc(fpr[i], tpr[i])
        fpr['micro'], tpr['micro'], _ = roc_curve(gold_scores.ravel(), pred_scores.ravel())
        roc_auc['micro'] = auc(fpr['micro'], tpr['micro'])
        plt.figure()
        plt.plot(fpr['micro'], tpr['micro'], label='micro-average (area = {:.2f})'.format(roc_auc['micro']))
        for i in range(len(classes)):
            plt.plot(fpr[i], tpr[i], label='{0} (area = {1:.2f})'.format(i, roc_auc[i]))
        plt.plot([0, 1], [0, 1], 'k--')
        plt.xlim([0.0, 1.0])
        plt.ylim([0.0, 1.05])
        plt.xlabel('False Positive Rate')
        plt.ylabel('True Positive Rate')
        plt.title('ROC Curves')
        plt.legend(loc='lower right')
        pdf.savefig()
Example #2
0
def write_score(name, gold_labels, pred_scores, classes, average_classes):
    classes, average_classes = np.array(classes), np.array(average_classes)
    gold_scores = LabelBinarizer().fit(classes).transform(gold_labels)
    pred_labels = classes[np.argmax(pred_scores, axis=1)]

    with closing(Tee('{}.txt'.format(name), 'w')):
        precision, recall, fscore, _ = precision_recall_fscore_support(
            gold_labels, pred_labels, labels=classes)
        for t in zip(classes, precision, recall, fscore):
            print('{}: P={:.2f}, R={:.2f}, F1={:.2f}'.format(*t))
        print('Accuracy: {:.4f}'.format(
            accuracy_score(gold_labels, pred_labels)))
        print('F1 average: {:.4f}'.format(
            np.mean(fscore[LabelEncoder().fit(classes).transform(
                average_classes)])))

    with PdfPages('{}.pdf'.format(name)) as pdf:
        fpr = {}
        tpr = {}
        roc_auc = {}
        for i in range(len(classes)):
            fpr[i], tpr[i], _ = roc_curve(gold_scores[:, i], pred_scores[:, i])
            roc_auc[i] = auc(fpr[i], tpr[i])
        fpr['micro'], tpr['micro'], _ = roc_curve(gold_scores.ravel(),
                                                  pred_scores.ravel())
        roc_auc['micro'] = auc(fpr['micro'], tpr['micro'])
        plt.figure()
        plt.plot(fpr['micro'],
                 tpr['micro'],
                 label='micro-average (area = {:.2f})'.format(
                     roc_auc['micro']))
        for i in range(len(classes)):
            plt.plot(fpr[i],
                     tpr[i],
                     label='{0} (area = {1:.2f})'.format(i, roc_auc[i]))
        plt.plot([0, 1], [0, 1], 'k--')
        plt.xlim([0.0, 1.0])
        plt.ylim([0.0, 1.05])
        plt.xlabel('False Positive Rate')
        plt.ylabel('True Positive Rate')
        plt.title('ROC Curves')
        plt.legend(loc='lower right')
        pdf.savefig()
Example #3
0
def train_predict_model(classifier, train_features, train_labels,
                        test_features, test_labels):
    train_lab = LabelBinarizer().fit_transform(np.array(train_labels))
    classifier.fit(train_features, train_lab.ravel())
    return classifier.predict(test_features)
    train_images.append(b)
train_images = np.array(train_images)

test_images = []
for i in range(len(test_X)):
    a = test_X[i]
    b = a.flatten()
    test_images.append(b)
test_images = np.array(test_images)

from sklearn import preprocessing
train_images = preprocessing.normalize(train_images)
test_images = preprocessing.normalize(test_images)

test_Y = test_Y.ravel()
train_Y = train_Y.ravel()

solver = 'relu'
nn = MLPClassifier(solver = 'lbfgs', max_iter = 100, hidden_layer_sizes = (200, 300, 100, 250, 300), verbose = True, activation = solver, alpha = 0.0001)
nn.fit(train_images, train_Y)
print ('solver = ', solver)
a = nn.predict(test_images)
print (nn.score(test_images, test_Y))
print (nn.score(train_images, train_Y))

solver = 'tanh'
nn = MLPClassifier(solver = 'lbfgs', max_iter = 100, hidden_layer_sizes = (300, 250, 200), verbose = True, activation = solver, alpha = 0.00001)
nn.fit(train_images, train_Y)
print ('solver = ', solver)
print (nn.score(test_images, test_Y))
print (nn.score(train_images, train_Y))