예제 #1
0
 def test_precision(self):
     matrix = confusion_matrix(self.one_dimensional_result,
                               self.one_dimensional_expected)
     actual = precision(matrix)
     expected = np.array(
         [Y_AS_Y / (Y_AS_Y + N_AS_Y), N_AS_N / (Y_AS_N + N_AS_N)])
     assert (np.abs(actual - expected) < EPSILON).all()
     matrix = confusion_matrix(self.multi_dimensional_result,
                               self.multi_dimensional_expected)
     actual = precision(matrix)
     expected = np.array([
         A_AS_A / (A_AS_A + B_AS_A + C_AS_A),
         B_AS_B / (A_AS_B + B_AS_B + C_AS_B),
         C_AS_C / (A_AS_C + B_AS_C + C_AS_C)
     ])
     assert (np.abs(actual - expected) < EPSILON).all()
예제 #2
0
def report_results(confusion: np.ndarray) -> None:
    """
    Report results of a training process

    :param confusion: Confusion matrix with result of training
    :return: None (print results measures)
    """
    measures = {
        "accuracy": accuracy(confusion),
        "precision": precision(confusion),
        "recall": recall(confusion),
        "f1_score": f1_score(confusion)
    }

    print("| Clases\t| *Accuracy* | *Precision* | *Recall* | *f1-score* |")
    print("| --------------- | ---------- | ----------- | -------- | ---------- |")
    accuracy_measure = round(measures["accuracy"], 4)
    for index, a_class in enumerate(classes):
        print("| **{name}** | {accuracy} | {precision} | {recall} | {f1_score} |".format(
            name=a_class, accuracy=accuracy_measure,
            precision=round(measures["precision"][index], 4),
            recall=round(measures["recall"][index], 4),
            f1_score=round(measures["f1_score"][index], 4))
        )
        accuracy_measure = ""
    print("\n")
예제 #3
0
            c = "r"

        line2 = ax2.plot(costs,
                         label="MSE{}".format(iteration),
                         linestyle="--",
                         linewidth=2.5,
                         c=c)
        lines = lines + line + line2

    ax.set_ylabel("Learning Curve", fontsize=FONT_SIZE)
    ax.set_xlabel("Epochs", fontsize=FONT_SIZE)
    ax.set_title("{} Network on Iris\n".format(type_net), fontsize=TITLE_SIZE)
    ax.grid()

    ax2.set_ylabel("Cost", fontsize=FONT_SIZE)
    ax2.grid()

    labels = [l.get_label() for l in lines]
    ax2.legend(lines, labels, fontsize=FONT_SIZE, loc="center right")

    show_matrix(ax3, c_m,
                (classes, ["Predicted\n{}".format(iris) for iris in classes]),
                "Confusion Matrix of Test Set\n", FONT_SIZE, TITLE_SIZE)

    print("Accuracy:\t{}".format(accuracy(c_m)))
    print("Precision:\t{}".format(precision(c_m)))
    print("Recall:\t{}".format(recall(c_m)))
    print("f1-score:\t{}".format(f1_score(c_m)))

    plt.savefig("../results/{}_on_iris{}.png".format(filename, k_fold))
예제 #4
0
def train_evaluate(architecture: dict, dataset_name: str) -> NeuralNetwork:
    """
    Train and evaluate a Network

    :param architecture: Architecture of NeuralNetwork (above)
    :param dataset_name: Dataset to use
    :return: Trained Neural Network
    """
    # import dataset
    dataset = import_data("../data/{}.data".format(dataset_name))

    dataset = oversample(dataset)
    more_info = "(oversample)"

    labels, encoding = one_hot_encoding(dataset[-1])
    classes = list(encoding.keys())
    dataset = np.delete(dataset, -1, 0)

    dataset = np.delete(dataset, [0], 0)

    # Initialize network
    logging.info("Input size: {}\tOutput size: {}".format(
        dataset.shape[0], len(encoding)))
    network = NeuralNetwork(dataset.shape[0], architecture["INTERNAL_LAYERS"],
                            len(encoding), architecture["ACT_FUNCS"],
                            architecture["LR"])

    # Define Trainer
    trainer = StandardTrainer(dataset, labels.T, TRAIN_SIZE)

    fig = plt.figure(figsize=FIG_SIZE)
    fig.subplots_adjust(wspace=0.3)
    ax = fig.add_subplot(121)
    ax2 = ax.twinx()
    ax3 = fig.add_subplot(122)

    trained, (learn, costs) = trainer.train(network,
                                            epochs=EPOCHS,
                                            repeat=True)

    prediction = trainer.evaluate(trained)

    c_m = confusion_matrix(prediction, trainer.get_labels())

    line = ax.plot(learn, label="Learning Curve", linewidth=2.5, c="b")

    line2 = ax2.plot(costs, label="MSE", linestyle="--", linewidth=2.5, c="r")
    lines = line + line2

    ax.set_ylabel("Learning Curve", fontsize=FONT_SIZE)
    ax.set_xlabel("Epochs", fontsize=FONT_SIZE)
    ax.set_title("Network on {}\n".format(dataset_name), fontsize=TITLE_SIZE)
    ax.grid()

    ax2.set_ylabel("Cost", fontsize=FONT_SIZE)
    ax2.grid()

    labels = [l.get_label() for l in lines]
    ax2.legend(lines, labels, fontsize=FONT_SIZE, loc="center right")

    show_matrix(
        ax3, c_m,
        (classes, ["Predicted\n{}".format(a_class) for a_class in classes]),
        "Confusion Matrix of Test Set\n", FONT_SIZE, TITLE_SIZE)

    measures = {
        "accuracy": accuracy(c_m),
        "precision": precision(c_m),
        "recall": recall(c_m),
        "f1_score": f1_score(c_m)
    }

    print("Summary on {}:\n".format(dataset))
    report_results(c_m)

    plt.savefig("../results/Network_on_{}{}.png".format(
        dataset_name, more_info))

    return trained
예제 #5
0
 labels, _ = one_hot_encoding(dataset)
 prediction, _ = one_hot_encoding(
     np.random.choice(["a", "b", "c"], size=labels.shape[0], replace=True))
 matrix = confusion_matrix(prediction.T, labels.T)
 fig, (ax1, ax2) = plt.subplots(1, 2, figsize=FIG_SIZE)
 show_matrix(ax1, matrix, ([classes[0], classes[1], classes[2]], [
     "Predicted\n" + classes[0], "Predicted\n" + classes[1],
     "Predicted\n" + classes[2]
 ]), "Confusion matrix of a iris dataset\n", FONT_SIZE, TITLE_SIZE)
 measures = np.zeros((3, 4))
 ax2.matshow(measures, cmap="Greys")
 to_show = np.zeros((3, 4))
 to_show[0][0] = round(accuracy(matrix), 4)
 to_show[1][0] = np.nan
 to_show[2][0] = np.nan
 _precision = precision(matrix)
 to_show[0][1] = round(_precision[0], 4)
 to_show[1][1] = round(_precision[1], 4)
 to_show[2][1] = round(_precision[2], 4)
 _recall = recall(matrix)
 to_show[0][2] = round(_recall[0], 4)
 to_show[1][2] = round(_recall[1], 4)
 to_show[2][2] = round(_recall[2], 4)
 _f1_score = f1_score(matrix)
 to_show[0][3] = round(_f1_score[0], 4)
 to_show[1][3] = round(_f1_score[1], 4)
 to_show[2][3] = round(_f1_score[2], 4)
 annotate(
     ax2, np.array(to_show), 25,
     np.array([[
         "Accuracy:\n", "Precision\n{}:\n".format(classes[0]),