Exemplo n.º 1
0
def svc (filename, parameters):

    dataset, _, _ = prepare_data(filename, normalization=False)

    np.random.shuffle(dataset)

    model = SVC(C=parameters['regularization_strength'], kernel=parameters['kernel'])
    fscores = []

    print('SVC parameters: {}'.format(parameters))

    for fold in range(folds):
        cross_val_train, cross_val_test = cross_val_split(dataset, folds, fold)

        cross_val_train_features, cross_val_train_labels = separate_features_labels(cross_val_train)
        cross_val_test_features, cross_val_test_labels = separate_features_labels(cross_val_test)

        model.fit(cross_val_train_features, cross_val_train_labels)

        cross_val_predicted_labels = model.predict(cross_val_test_features)
        fscore = f_score(cross_val_predicted_labels, cross_val_test_labels)

        fscores.append(fscore)

        print('Fold: {}. Cross-validation F-score: {}'.format(fold+1, fscore))

    avg_fscore = np.mean(fscores)

    print('Average fscore: {}'.format(avg_fscore))

    return avg_fscore, parameters
def eval_classifier(classifier, x, y):
    y_pred = classifier.predict(x)

    conf = metrics.conf_matrix(y_pred, y)
    accuracy = metrics.accuracy(y_pred, y)
    precision = metrics.precision(y_pred, y)
    recall = metrics.recall(y_pred, y)
    f1_score = metrics.f_score(y_pred, y, beta=1)
    avg_prec = np.mean(precision)
    avg_rec = np.mean(recall)
    avg_f1 = np.mean(f1_score)

    print("Confusion Matrix: ")
    print(conf)
    print("Accuracy:")
    print(accuracy)
    print("Precision:")
    print(precision)
    print(f"Average Precision: {avg_prec}")
    print("Recall:")
    print(recall)
    print(f"Average Recall: {avg_rec}")
    print("F1_score:")
    print(f1_score)
    print(f"Average F1 Score: {avg_f1}")
Exemplo n.º 3
0
    def temporal_score(self, iou_list, video_clips, bg_class=0):
        output_scores = self.__init_out_score_dict(iou_list)

        for iou in iou_list:
            confusion_mat = Dict(fp=0, tp=0, fn=0)
            class_confusion_mat = Dict()
            for c in range(self.num_classes):
                class_confusion_mat[c] = Dict(fp=0, tp=0, fn=0)
            for video_name, clip_list in video_clips.items():
                clips = video_clips[video_name]
                for c in range(self.num_classes):
                    targets = (np.array(clips.targets) == c)
                    predictions = (np.array(clips.predictions) == c)
                    tp1, fp1, fn1 = f_score(predictions,
                                            targets,
                                            iou,
                                            bg_class=0)

                    class_confusion_mat[c].fp += fp1
                    class_confusion_mat[c].tp += tp1
                    class_confusion_mat[c].fn += fn1

                tp1, fp1, fn1 = f_score(clips.predictions,
                                        clips.targets,
                                        iou,
                                        bg_class=bg_class)

                confusion_mat.tp += tp1
                confusion_mat.fp += fp1
                confusion_mat.fn += fn1

            for c in range(self.num_classes):
                output_scores["class_{}".format(c)]["iou_{:.2f}".format(
                    iou)] = calc_f1(class_confusion_mat[c].fn,
                                    class_confusion_mat[c].fp,
                                    class_confusion_mat[c].tp)

            output_scores.overall["iou_{:.2f}".format(iou)] = calc_f1(
                confusion_mat.fn, confusion_mat.fp, confusion_mat.tp)

        return output_scores
def dice_loss(gt, pr, class_weights=1., smooth=SMOOTH, per_image=True, beta=1.):
    r"""Dice loss function for imbalanced datasets:

    .. math:: L(precision, recall) = 1 - (1 + \beta^2) \frac{precision \cdot recall}
        {\beta^2 \cdot precision + recall}

    Args:
        gt: ground truth 4D keras tensor (B, H, W, C)
        pr: prediction 4D keras tensor (B, H, W, C)
        class_weights: 1. or list of class weights, len(weights) = C
        smooth: value to avoid division by zero
        per_image: if ``True``, metric is calculated as mean over images in batch (B),
            else over whole batch
        beta: coefficient for precision recall balance

    Returns:
        Dice loss in range [0, 1]

    """
    return 1 - f_score(gt, pr, class_weights=class_weights, smooth=smooth, per_image=per_image, beta=beta)
Exemplo n.º 5
0
    results = []
    skf = cross_validation.StratifiedKFold(y, n_folds=folds)
    for train_index, test_index in skf:
        train_X, test_X = data.iloc[train_index], data.iloc[test_index]
        train_y, test_y = y.iloc[train_index], y.iloc[test_index]

        clf = None
        clf = RandomForestClassifier(n_estimators=n_estimators, criterion=criterion,
                                     max_depth=max_depth, min_samples_split=min_samples_split,
                                     n_jobs=n_processes)

        clf.fit(train_X, train_y)
        results.append(metrics.predict_table(clf, test_X, test_y))

    result = pd.concat(results)

    output = open(result_path + 'Arboles/Arbol.pkl', 'wb+')
    pickle.dump(clf, output)
    output.close()

    result.to_csv(result_path + 'Predicciones/result.csv')

    matrix = metrics.confusion_matrix(result)
    matrix.to_csv(result_path + 'Metricas/soft_matrix_.csv')

    clases = matrix.columns.tolist()
    f_score = [metrics.f_score(matrix, c) for c in clases]

    with open(result_path + 'Metricas/results.txt') as f:
        f.write(clases + '\n')
        f.write(str(f_score) + '\n')
    def classValidation(self, epoch):
        self.model.eval()
        predictions = {}
        targets = {}

        tp, fp, fn = 0, 0, 0

        with torch.no_grad():
            progress_bar = tqdm(self.valLoader)

            for step, (data, masks, anomaly, category, _,
                       clipNames) in enumerate(progress_bar):

                if self.modelType == "mstcn":
                    anomaly = anomaly.view(-1)
                    clipNames = np.array(clipNames).reshape(-1).tolist()

                if torch.cuda.is_available():
                    data = data.cuda().float()
                    anomaly = anomaly.cuda().float()
                    masks = masks.cuda().float()

                outputs = self.model(data, masks)

                loss = 0
                for output in outputs:
                    loss += self.ceLoss(
                        output.transpose(2, 1).reshape(-1, 2), anomaly.long())

                outputs = outputs[-1].transpose(2, 1).reshape(-1,
                                                              2).cpu().numpy()
                anomaly = anomaly.cpu().numpy().flatten().tolist()

                if step % 10 == 0:
                    progress_bar.set_description(
                        "Val [{}]:[{}/{}] Loss: {:.2f}".format(
                            epoch, step, self.valLoader.__len__(),
                            loss.item()))

                for clipName, prediction, target in zip(
                        clipNames, outputs, anomaly):
                    predictions[clipName] = prediction
                    targets[clipName] = target

            videoClips = self.valLoader.dataset.__getVideoClips__()
            for videoName, clipList in tqdm(videoClips.items()):
                clipPredictions = []
                clipTargets = []
                for clipName in clipList:
                    clipPredictions.append(predictions[clipName])
                    clipTargets.append(targets[clipName])
                clipPredictions = np.argmax(np.array(clipPredictions), axis=1)
                utils.visualizeTemporalPredictions(clipPredictions,
                                                   clipTargets, self.expFolder,
                                                   videoName)
                tp1, fp1, fn1 = f_score(clipPredictions,
                                        clipTargets,
                                        0.1,
                                        bg_class=-1)
                tp += tp1
                fp += fp1
                fn += fn1

        f1, precision, recall = calc_f1(fn, fp, tp)
        print('F1@%0.2f-%0.2f : %.4f, TP: %.4f, FP: %.4f, FN: %.4f' %
              (0.1, 0.5, f1, tp, fp, fn))
        self.writer.add_scalar("Eval/F1_0.10-%0.2f" % 0.5, f1,
                               self.stepCounter)
        self.writer.add_scalar("Confusion/%0.2f/TP_0.10" % 0.5, tp,
                               self.stepCounter)
        self.writer.add_scalar("Confusion/%0.2f/FP_0.10" % 0.5, fp,
                               self.stepCounter)
        self.writer.add_scalar("Confusion/%0.2f/TP_0.10" % 0.5, fn,
                               self.stepCounter)

        return f1
    def binaryValidation(self, epoch):
        self.model.eval()
        anomalyPredictions = []
        anomalyTargets = []
        predictions = {}
        targets = {}

        thresholds = [0.5, 0.75, 0.9]
        IOUs = [0.1, 0.25, 0.5]
        score = 0

        with torch.no_grad():
            progress_bar = tqdm(self.valLoader)
            for step, (data, masks, anomaly, category, _,
                       clipNames) in enumerate(progress_bar):

                if self.modelType == "tcn" or self.modelType == "mstcn" or self.modelType == "mcbtcn":
                    anomaly = anomaly.view(-1)
                    clipNames = np.array(clipNames).reshape(-1).tolist()

                if torch.cuda.is_available():
                    data = data.float().cuda()
                    anomaly = anomaly.float().cuda()
                    masks = masks.float().cuda()

                if self.modelType == "mstcn":
                    outputs = self.model(data, masks)
                    outputs = outputs[-1].view(-1)
                elif self.modelType == "mcbtcn":
                    classOutputs, binaryOutputs = self.model(data, masks)
                    outputs = binaryOutputs[-1].view(-1)
                else:
                    outputs = self.model(data)

                mask = (anomaly != self.maskValue).nonzero().squeeze().cpu()
                outputs = outputs[mask]

                clipNames = np.array(clipNames)[mask].tolist()
                anomaly = anomaly[mask]
                loss = self.mseLoss(outputs.squeeze(), anomaly)

                outputs = outputs.reshape(-1).cpu().numpy().tolist()
                anomaly = anomaly.cpu().numpy().flatten().tolist()

                anomalyTargets += anomaly
                anomalyPredictions += outputs

                if step % 10 == 0:
                    progress_bar.set_description(
                        "Val [{}]:[{}/{}] Loss: {:.2f}".format(
                            epoch, step, self.valLoader.__len__(),
                            loss.item()))

                for clipName, prediction, target in zip(
                        clipNames, outputs, anomaly):
                    if clipName not in predictions:
                        predictions[clipName] = []
                    if clipName not in targets:
                        targets[clipName] = []
                    predictions[clipName].append(prediction)
                    targets[clipName].append(target)

            videoClips = self.valLoader.dataset.__getVideoClips__()
            for iou in IOUs:
                for s, threshold in enumerate(thresholds):
                    tp, fp, fn = 0, 0, 0
                    normal = {"tp": 0, "fp": 0, "fn": 0}
                    abnormal = {"tp": 0, "fp": 0, "fn": 0}
                    for videoName, clipList in tqdm(videoClips.items()):
                        clipPredictions = []
                        clipTargets = []
                        for clipName in clipList:
                            clipPredictions.append(
                                np.mean(np.array(predictions[clipName])))
                            clipTargets.append(
                                np.mean(np.array(targets[clipName])))
                        # if "Assault010_x264" in videoName:
                        #     auc_score = sklrn.roc_auc_score(clipTargets, clipPredictions)
                        #     utils.visualizeHeatMapPredictions(clipPredictions, clipTargets, self.expFolder, videoName)
                        #     print("AUC Score of selected video: {}".format(auc_score))
                        clipPredictions = (
                            np.array(clipPredictions) >
                            threshold).astype("float32").tolist()
                        if iou == 0.25 and threshold == 0.5:
                            utils.visualizeTemporalPredictions(
                                clipPredictions, clipTargets, self.expFolder,
                                videoName)

                        tp1, fp1, fn1 = f_score(clipPredictions,
                                                clipTargets,
                                                iou,
                                                bg_class=0)
                        abnormal["tp"] += tp1
                        abnormal["fp"] += fp1
                        abnormal["fn"] += fn1

                        tp1, fp1, fn1 = f_score(clipPredictions,
                                                clipTargets,
                                                iou,
                                                bg_class=1)
                        normal["tp"] += tp1
                        normal["fp"] += fp1
                        normal["fn"] += fn1

                        if self.noNormalSegmentation:
                            tp1, fp1, fn1 = f_score(clipPredictions,
                                                    clipTargets,
                                                    iou,
                                                    bg_class=0)
                        else:
                            tp1, fp1, fn1 = f_score(clipPredictions,
                                                    clipTargets,
                                                    iou,
                                                    bg_class=-1)
                            # if "Assault010_x264" in videoName:
                            #     precision = tp1 / float(tp1 + fp1 + 1e-10)
                            #     recall = tp1 / float(tp1 + fn1 + 1e-10)
                            #     f1 = 2.0 * (precision * recall) / (precision + recall + 1e-10)
                            #     print("F1 Score of selected video: {}".format(f1))

                        tp += tp1
                        fp += fp1
                        fn += fn1

                    a_f1, a_precision, a_recall = calc_f1(
                        abnormal["fn"], abnormal["fp"], abnormal["tp"])
                    print(
                        'Abnormal F1@%0.2f-%0.2f : %.4f, Precision: %.4f, Recall: %.4f'
                        % (iou, threshold, a_f1, a_precision * 100,
                           a_recall * 100))
                    n_f1, n_precision, n_recall = calc_f1(
                        normal["fn"], normal["fp"], normal["tp"])
                    print(
                        'Normal F1@%0.2f-%0.2f : %.4f, Precision: %.4f, Recall: %.4f'
                        % (iou, threshold, n_f1, n_precision * 100,
                           n_recall * 100))
                    f1, precision, recall = calc_f1(fn, fp, tp)
                    if iou == 0.25 and threshold == 0.5:
                        score = f1
                    print(
                        'F1@%0.2f-%0.2f : %.2f, TP: %.2f, FP: %.2f, FN: %.2f' %
                        (iou, threshold, f1, tp, fp, fn))
                    # print('Precision@%0.2f-%0.2f : %.2f, Recall@%0.2f-%0.2f: %.2f' % (iou, threshold, precision * 100,
                    #                                                                   iou, threshold, recall * 100))

                    if self.writer is not None:
                        self.writer.add_scalar(
                            "Eval/F1_%0.2f-%0.2f" % (iou, threshold), f1,
                            self.stepCounter)
                        self.writer.add_scalar(
                            "Confusion/TP_%0.2f-%0.2f" % (iou, threshold), tp,
                            self.stepCounter)
                        self.writer.add_scalar(
                            "Confusion/FP_%0.2f-%0.2f" % (iou, threshold), fp,
                            self.stepCounter)
                        self.writer.add_scalar(
                            "Confusion/FN_%0.2f-%0.2f" % (iou, threshold), fn,
                            self.stepCounter)

        fpr, tpr, _ = sklrn.roc_curve(anomalyTargets, anomalyPredictions)
        rocAUC = sklrn.auc(fpr, tpr)
        if self.writer is not None:
            self.writer.add_scalar("Eval/AUC", rocAUC, self.stepCounter)
        print('AUC Score %0.2f' % (rocAUC * 100))

        return score
Exemplo n.º 8
0
    x_values = {clase: [] for clase in clases}
    valores_fscore = {clase: [] for clase in clases}

    result = result.sort('trust', axis=0)

    for i in xrange(100):
        
        # Obtengo las predicciones con una confianza mayor a cierto umbral
        trust_threshold = float(i)/100
        result = result[result['trust'] > trust_threshold]

        # matrix = metrics.hard_matrix(result)
        matrix = metrics.confusion_matrix(result)

        # Si el f_score es menor que cero, es porque no habian datos que superaran tal nivel de confianza
        f_scores = {clase: metrics.f_score(matrix, clase) for clase in clases}

        for clase in clases:
            if f_scores[clase] >= 0:
                valores_fscore[clase].append(f_scores[clase])
                x_values[clase].append(trust_threshold)

    for clase in clases:
        x_list = x_values[clase]
        y_list = valores_fscore[clase]
        
        plt.figure(clase)
        plt.plot( x_list, y_list, '-ob')

        plt.ylim(0.0, 1.0)
        plt.xlim(0.0, 1.0)
Exemplo n.º 9
0
                                         criterion='entropy',
                                         max_depth=14,
                                         min_samples_split=20,
                                         n_jobs=2)

            clf.fit(train_X, train_y)
            results.append(metrics.predict_table(clf, test_X, test_y))

        result = pd.concat(results)

        matrix = metrics.confusion_matrix(result)

        clases = matrix.columns.tolist()
        precisions = [metrics.precision(matrix, c) for c in clases]
        recalls = [metrics.recall(matrix, c) for c in clases]
        f_scores = [metrics.f_score(matrix, c) for c in clases]

        w_score = metrics.weighted_f_score(matrix)

        # f = open(result_dir + str(max_depth) + ' ' + str(min_samples_split) + '.txt', 'w')
        f = open(result_dir + str(p) + '.txt', 'w')

        f.write('F_score by class')
        f.write('\n')
        f.write(str(f_scores))
        f.write('\n')
        f.write('\n')
        f.write('Weighted average: ')
        f.write(str(w_score))

        f.close()
Exemplo n.º 10
0
        # Para cada porcentaje de confianza
        for i in xrange(100):

            # Obtengo las predicciones con una confianza mayor a cierto umbral
            porcentaje = float(i) / 100

            aux = result[result['trust'] > porcentaje]

            # matrix = metrics.confusion_matrix(aux)
            matrix = metrics.hard_matrix(aux)

            # Si la precision es menor que cero, es porque no habian datos que superaran tal nivel de confianza
            precision = metrics.accuracy(matrix, clase)
            if precision >= 0:
                valores_accuracy.append(precision)
                valores_recall.append(metrics.recall(matrix, clase))
                x_values.append(porcentaje)

            # Si el f_score es menor que cero, es porque no habian datos que superaran tal nivel de confianza
            f_score = metrics.f_score(matrix, clase)
            if f_score >= 0:
                valores_fscore.append(f_score)
                x_values_fscore.append(porcentaje)

        #graf(clase, x_values, valores_accuracy, 'Accuracy')
        graf(clase, x_values, valores_recall, 'Recall')
        #graf(clase, x_values_fscore, valores_fscore, 'F-Score')
        print 'a'

    plt.show()
Exemplo n.º 11
0
# Trees initialisation
tree_full = DecisionTreeClassifier()

mean_acc, std_dev_acc = \
    metrics.k_cross_val(tree_full, x_train, y_train, k=10, seed=42)

# Q3.2
print('Q3.2')
print(mean_acc, std_dev_acc)

y_pred = tree_full.predict(x_test)

test_accuracy = metrics.accuracy(y_pred, y_test)
test_precision = metrics.precision(y_pred, y_test)
test_recall = metrics.recall(y_pred, y_test)
test_f_score = metrics.f_score(y_pred, y_test)
labels, confusion_matrix = metrics.conf_matrix(y_pred, y_test)

# Q3.3
print('Q3.3')
print(labels)
print(confusion_matrix)
print('Test acc:', test_accuracy)
print('Test precision:', test_precision)
print('Test recall:', test_recall)
print('Test f1 score:', test_f_score)
print('Average test recall:', metrics.avg_recall(y_pred, y_test))
print('Average test precision:', metrics.avg_precision(y_pred, y_test))
print('Average test f1-score:', metrics.avg_f_score(y_pred, y_test))
Exemplo n.º 12
0
		# Para cada porcentaje de confianza
		for i in xrange(100):

			# Obtengo las predicciones con una confianza mayor a cierto umbral
			porcentaje = float(i)/100

			aux = result[result['trust'] > porcentaje]

			# matrix = metrics.confusion_matrix(aux)
			matrix = metrics.hard_matrix(aux)

			# Si la precision es menor que cero, es porque no habian datos que superaran tal nivel de confianza
			precision = metrics.accuracy(matrix, clase)
			if precision >= 0:
				valores_accuracy.append(precision)
				valores_recall.append(metrics.recall(matrix, clase))
				x_values.append(porcentaje)

			# Si el f_score es menor que cero, es porque no habian datos que superaran tal nivel de confianza
			f_score = metrics.f_score(matrix, clase)
			if f_score >= 0:
				valores_fscore.append(f_score)
				x_values_fscore.append(porcentaje)			

		#graf(clase, x_values, valores_accuracy, 'Accuracy')
		graf(clase, x_values, valores_recall, 'Recall')
		#graf(clase, x_values_fscore, valores_fscore, 'F-Score')
		print 'a'

	plt.show()
Exemplo n.º 13
0
    x_values = {clase: [] for clase in clases}
    valores_fscore = {clase: [] for clase in clases}

    result = result.sort('trust', axis=0)

    for i in xrange(100):

        # Obtengo las predicciones con una confianza mayor a cierto umbral
        trust_threshold = float(i) / 100
        result = result[result['trust'] > trust_threshold]

        # matrix = metrics.hard_matrix(result)
        matrix = metrics.confusion_matrix(result)

        # Si el f_score es menor que cero, es porque no habian datos que superaran tal nivel de confianza
        f_scores = {clase: metrics.f_score(matrix, clase) for clase in clases}

        for clase in clases:
            if f_scores[clase] >= 0:
                valores_fscore[clase].append(f_scores[clase])
                x_values[clase].append(trust_threshold)

    for clase in clases:
        x_list = x_values[clase]
        y_list = valores_fscore[clase]

        plt.figure(clase)
        plt.plot(x_list, y_list, '-ob')

        plt.ylim(0.0, 1.0)
        plt.xlim(0.0, 1.0)
# Find predictions for each folds

predictions = []
for tree in trees:
    prediction = tree.predict(x_test)
    predictions.append(prediction)

# Combine the predictions and get mode

predictions = np.array(predictions)
mode = stats.mode(predictions)[0].flatten()
# print('mode = ', mode)
test_accuracy = metrics.accuracy(mode, y_test)
test_precision = metrics.precision(mode, y_test)
test_recall = metrics.recall(mode, y_test)
test_f_score = metrics.f_score(mode, y_test)
labels, confusion_matrix = metrics.conf_matrix(mode, y_test)

print('Q3.4')
print(labels)
print(confusion_matrix)
print('Test acc:', test_accuracy)
print('Test precision:', test_precision)
print('Test recall:', test_recall)
print('Test f1 score:', test_f_score)
print('Average test recall:', metrics.avg_recall(mode, y_test))
print('Average test precision:', metrics.avg_precision(mode, y_test))
print('Average test f1-score:', metrics.avg_f_score(mode, y_test))

print('mode accuracy = ', metrics.accuracy(mode, y_test))