Пример #1
0
def report_classification(y_true, y_score, n_cls, title):
    print(title)
    y_pred = np.argmax(y_score, axis=-1)
    print(classification_report(y_true, y_pred, digits=5))
    plot_roc_curve(title, to_categorical(y_true, n_cls), y_score, n_cls)
    cm = confusion_matrix(y_true, y_pred)
    plot_confusion_matrix(cm, title, np.unique(y_true))
Пример #2
0
def classifyDataset(setIn, setOut, center, distanceFunc, fileName, imgTitle,
                    metric):
    # measure performance by creating confusion matrix
    correctClassifications = [0] * 10
    confusionMatrix = np.zeros([10, 10])

    for i in range(0, len(setIn)):
        actualNumber = int(setOut[i])
        recognizedNumber = classify(setIn[i], center, distanceFunc)
        confusionMatrix[actualNumber][recognizedNumber] += 1
        if (actualNumber == recognizedNumber):
            correctClassifications[actualNumber] += 1

    sum = 0
    for i in range(0, 10):
        sum += correctClassifications[i]

    print("correct classified with " + metric + " " + str(sum),
          "Correct/Incorrect ratio:", str(sum / len(setIn)))

    # Plot non-normalized confusion matrix
    plt.figure()
    plot_confusion_matrix(confusionMatrix,
                          classes=range(0, 10),
                          title=imgTitle)
    plt.savefig(fileName)
Пример #3
0
def svm_report(rpt: Report, y_true, y_pred, n_cls, title):
    print(classification_report(y_true, y_pred, digits=5))
    cm = confusion_matrix(y_true, y_pred)
    plot_confusion_matrix(cm, title, np.unique(y_true))
    accu = accuracy_score(y_true, y_pred)
    rpt.write_accuracy(accu).flush()
    result = precision_recall_fscore_support(y_true,
                                             y_pred,
                                             average='weighted')
    rpt.write_precision_recall_f1score(result[0], result[1], result[2])
Пример #4
0
def clf_report(rpt: Report, y_true, y_score, n_cls, title):
    y_pred = np.argmax(y_score, axis=-1)
    print(classification_report(y_true, y_pred, digits=5))
    plot_roc_curve(title, to_categorical(y_true, n_cls), y_score, n_cls)
    cm = confusion_matrix(y_true, y_pred)
    plot_confusion_matrix(cm, title, np.unique(y_true))
    for k in TOP_K_ACCU:
        score = top_k_accuracy(y_score, y_true, k)
        rpt.write_top_k_accuracy(score, k).flush()
    result = precision_recall_fscore_support(y_true,
                                             y_pred,
                                             average='weighted')
    rpt.write_precision_recall_f1score(result[0], result[1], result[2])
Пример #5
0
def testWith(setIn, setOut, weights, bias):
    total = 0
    correct = 0
    confusionMatrix = np.zeros((10, 10))
    for i in range(0, len(setIn)):
        total += 1
        # We just use the maximum value of the classification vector as the identified digit
        recognized = maxIndex(classify(setIn[i], weights, bias))

        confusionMatrix[int(setOut[i])][int(recognized)] += 1
        if (recognized == setOut[i]):
            correct += 1

    plt.figure()
    plot_confusion_matrix(
        confusionMatrix,
        range(0, 10),
        title=
        "Confusion matrix for test set classification (multiclass perceptron)")
    plt.savefig("cm_multi_class_perceptron.png")
    plt.show()
    print("Correct", correct, "Total", total, "Ratio:", correct / total)
Пример #6
0
# Print & save results
torch.save(model.state_dict(), path + '/model.ckpt')

x = np.linspace(0, num_epochs, num_epochs)
plt.subplot(1, 2, 1)
plt.plot(x, trainLoss)
plt.plot(x, validLoss)

plt.subplot(1, 2, 2)
plt.plot(x, validAcc)
plt.savefig(path + '/learning_curve.png')
plt.show()

# Plotting confusion matrix
cm = confusion_matrix(ground_truth, predictions)
plot_confusion_matrix(cm.astype(np.int64), classes=categories, path=path)

# save specs
dict = {
    'Dataset': DATASET,
    'Pretrained': str(pretrained),
    'Model degree': None,
    'Num Epochs': num_epochs,
    'Batch size': batch_size,
    'Validation ratio': validationRatio,
    'ValidationTest ratio': validationTestRatio,
    'Learning rate': learning_rate,
    'Specific lr': specific_lr,
    'Device_ID': DEVICE_ID,
    'imsize': imsize,
    'Loss fct': criterion,
Пример #7
0
true_labels = test_data.classes
class_labels = list(test_data.class_indices.keys())
#
# 3. Use scikit-learn to get statistics

report = classification_report(true_labels,
                               pred_labels,
                               target_names=class_labels)
print('Classification Report')
print(report)
print('Confusion matrix')
cnf_matrix = confusion_matrix(true_labels, pred_labels)
np.set_printoptions(precision=2)
print(cnf_matrix)

# Plot non-normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix,
                      classes=class_labels,
                      title='Confusion matrix, without normalization')

# Plot normalized confusion matrix
plt.figure()
plot_confusion_matrix(
    cnf_matrix,
    classes=class_labels,
    normalize=True,
    title='{} model normalized confusion matrix'.format(model_name))
plt.savefig('{}.jpg'.format(trained_models_path))
Пример #8
0
#
# test_steps = test_data.samples/test_data.batch_size
# predicted_classes = model.predict_generator(test_data,steps=test_steps)
# predicted_labels = np.argmax(predicted_classes, axis=1)

# 2.Get ground-truth classes and class-labels

# true_labels = test_data.classes
# class_labels = list(test_data.class_indices.keys())

predicted_classes = model.predict(xtest)
predicted_labels = np.argmax(predicted_classes, axis=1)
true_labels = ytest
class_labels = ["angry","disgust","scaredq", "happy", "sad", "surprised","neutral"]      ### FER labels


# 3. Use scikit-learn to get statistics

report = classification_report(true_labels, predicted_labels, target_names=class_labels)
print('Classification Report')
print(report)
print('Confusion matrix')
cnf_matrix = confusion_matrix(true_labels, predicted_labels)
np.set_printoptions(precision=2)
print(cnf_matrix)

# Plot normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=class_labels, normalize=True,
                      title='%s %s model confusion matrix'%(model_name, data_base_name))
plt.savefig('./{}_cm.jpg'.format(trained_models_path))