Example #1
0
    def evaluate(self, model, input_value, target_value):
        target = MultiLabelBinarizer().fit_transform(target_value)
        prediction = model.predict(input_value)

        conf = confusion_matrix(target.argmax(axis=1),
                                prediction.argmax(axis=1))
        acc = [accuracy_score(target[i], prediction[i]) for i in range(5)]
        pre = precision_score(target, prediction, average=None)
        rec = recall_score(target, prediction, average=None)
        f1 = f1_score(target, prediction, average=None)

        # print(classification_report(target, prediction, target_names=['#GENERAL', '#FEATURE', '#PRICE', '#CAMERA', '#DESIGN#SCREEN'], zero_division=1))

        return conf, acc, pre, rec, f1
Example #2
0
    ground_truth_test = read_conll('test_sentences_groundtruth.txt')
    length_ground_truth_test = len(ground_truth_test)
    print("Length of all prediction: ", length_ground_truth_test)
    words_groundtruth_test = [[c[0] for c in x] for x in ground_truth_test]
    groundtruth_label_test = [[c[1] for c in x] for x in ground_truth_test]

    ground_truth_train = read_conll('train_sentences_groundtruth.txt')
    length_ground_truth_train = len(ground_truth_train)
    print("Length of all prediction: ", length_ground_truth_train)
    words_groundtruth_train = [[c[0] for c in x] for x in ground_truth_train]
    groundtruth_label_train = [[c[1] for c in x] for x in ground_truth_train]

    y_true_total = MultiLabelBinarizer().fit_transform(groundtruth_label)
    y_pred_total = MultiLabelBinarizer().fit_transform(label_aggregation)
    print(
        metrics.confusion_matrix(y_true_total.argmax(axis=1),
                                 y_pred_total.argmax(axis=1)))
    print(
        metrics.classification_report(y_true_total,
                                      y_pred_total,
                                      digits=3,
                                      labels=[0, 1, 2, 3, 4, 5, 6, 7]))

    pr_test = y_pred_total

    yh = y_true_total
    fyh, fpr = score(yh, pr_test)
    print("Testing accuracy:" + str(accuracy_score(fyh, fpr)))
    print("Testing confusion matrix:")
    print(confusion_matrix(fyh, fpr))