コード例 #1
0
def distance_to_boundary_imagenet(dataset, num_classes, num_targeted_classes,
                                  num_of_samples_per_class, model_name,
                                  distance_save_dir, imagenet_path):
    distance_correct_train = np.zeros(num_targeted_classes) - 1
    distance_correct_train_std = np.zeros(num_targeted_classes) - 1
    distance_correct_test = np.zeros(num_targeted_classes) - 1
    distance_correct_test_std = np.zeros(num_targeted_classes) - 1
    distance_incorrect_train = np.zeros(num_targeted_classes) - 1
    distance_incorrect_train_std = np.zeros(num_targeted_classes) - 1
    distance_incorrect_test = np.zeros(num_targeted_classes) - 1
    distance_incorrect_test_std = np.zeros(num_targeted_classes) - 1

    correct_train_samples = np.zeros(num_targeted_classes) - 1
    correct_test_samples = np.zeros(num_targeted_classes) - 1
    incorrect_train_samples = np.zeros(num_targeted_classes) - 1
    incorrect_test_samples = np.zeros(num_targeted_classes) - 1

    for j in range(num_targeted_classes):
        (x_train,
         y_train), (x_test,
                    y_test), keras_class_id = load_Data_with_imagenet_id(
                        j + 1, imagenet_path=imagenet_path)

        x_train = keras.applications.inception_v3.preprocess_input(x_train)
        x_test = keras.applications.inception_v3.preprocess_input(x_test)

        labels_train_by_model = model.predict(x_train)
        labels_test_by_model = model.predict(x_test)
        labels_train_by_model = np.argmax(labels_train_by_model, axis=1)
        labels_test_by_model = np.argmax(labels_test_by_model, axis=1)

        labels_train = y_train
        labels_test = y_test

        correctly_classified_indexes_train = labels_train_by_model == labels_train
        incorrectly_classified_indexes_train = labels_train_by_model != labels_train

        correctly_classified_indexes_test = labels_test_by_model == labels_test
        incorrectly_classified_indexes_test = labels_test_by_model != labels_test

        if show_correct_distance:
            print('Computing distance for class ', j)
            cor_class_yes_x = x_train[correctly_classified_indexes_train]
            cor_class_no_x = x_test[correctly_classified_indexes_test]

            if num_of_samples_per_class > 0:
                if cor_class_yes_x.shape[0] > num_of_samples_per_class:
                    cor_class_yes_x = cor_class_yes_x[:
                                                      num_of_samples_per_class]
                if cor_class_no_x.shape[0] > num_of_samples_per_class:
                    cor_class_no_x = cor_class_no_x[:num_of_samples_per_class]

            distance_per_sample_train = np.zeros(cor_class_yes_x.shape[0]) - 1
            distance_per_sample_test = np.zeros(cor_class_no_x.shape[0]) - 1
            for i in range(cor_class_yes_x.shape[0]):
                distance_per_sample_train[i] = dist_to_boundary(
                    cor_class_yes_x[i:i + 1], model, j, norm=2)
                if i % reload_session_period == 0:
                    model = reload_session(model_name)
                    print('Train samples progress: ', i, '/',
                          cor_class_yes_x.shape[0])
            for i in range(cor_class_no_x.shape[0]):
                distance_per_sample_test[i] = dist_to_boundary(
                    cor_class_no_x[i:i + 1], model, j, norm=2)
                if i % reload_session_period == 0:
                    model = reload_session(model_name)
                    print('Test samples progress: ', i, '/',
                          cor_class_no_x.shape[0])

            if save_distances:
                np.save(distance_save_dir + '/' + 'cor-train-' + str(j),
                        distance_per_sample_train)
                np.save(distance_save_dir + '/' + 'cor-test-' + str(j),
                        distance_per_sample_test)

            distance_per_sample_train = distance_per_sample_train[
                distance_per_sample_train != -1]
            distance_per_sample_test = distance_per_sample_test[
                distance_per_sample_test != -1]

            distance_correct_train[j], distance_correct_train_std[
                j] = average_over_positive_values(distance_per_sample_train)
            distance_correct_test[j], distance_correct_test_std[
                j] = average_over_positive_values(distance_per_sample_test)

            correct_train_samples[j] = distance_per_sample_train.shape[0]
            correct_test_samples[j] = distance_per_sample_test.shape[0]

        if show_incorrect_distance:
            print("incorrectly classified...")
            incor_class_yes_x = x_train[incorrectly_classified_indexes_train]
            incor_class_no_x = x_test[incorrectly_classified_indexes_test]

            if incor_class_yes_x.shape[0] < 10 or incor_class_no_x.shape[
                    0] < 10:
                print(
                    "skip distance computation for inccorectly labeled samples due to lack os misclassified samples!"
                )
            else:
                if num_of_samples_per_class > 0:
                    if incor_class_yes_x.shape[0] > num_of_samples_per_class:
                        incor_class_yes_x = incor_class_yes_x[:
                                                              num_of_samples_per_class]
                    if incor_class_no_x.shape[0] > num_of_samples_per_class:
                        incor_class_no_x = incor_class_no_x[:
                                                            num_of_samples_per_class]

                distance_per_sample_train = np.zeros(
                    incor_class_yes_x.shape[0]) - 1
                distance_per_sample_test = np.zeros(
                    incor_class_no_x.shape[0]) - 1
                print(distance_per_sample_train.shape,
                      distance_per_sample_test.shape)
                for i in range(incor_class_yes_x.shape[0]):
                    distance_per_sample_train[i] = dist_to_boundary(
                        incor_class_yes_x[i:i + 1], model, j, norm=2)
                    if i % reload_session_period == 0:
                        model = reload_session(model_name)
                        print('Train samples progress: ', i, '/',
                              incor_class_yes_x.shape[0])
                for i in range(incor_class_no_x.shape[0]):
                    distance_per_sample_test[i] = dist_to_boundary(
                        incor_class_no_x[i:i + 1], model, j, norm=2)
                    if i % reload_session_period == 0:
                        model = reload_session(model_name)
                        print('Train samples progress: ', i, '/',
                              incor_class_no_x.shape[0])

                if save_distances:
                    np.save(
                        distance_save_dir + 'save_distances/' +
                        'incor-train-' + str(j), distance_per_sample_train)
                    np.save(
                        distance_save_dir + 'save_distances/' + 'incor-test-' +
                        str(j), distance_per_sample_test)

                distance_per_sample_train = distance_per_sample_train[
                    distance_per_sample_train != -1]
                distance_per_sample_test = distance_per_sample_test[
                    distance_per_sample_test != -1]

                distance_incorrect_train[j], distance_incorrect_train_std[
                    j] = average_over_positive_values(
                        distance_per_sample_train)
                distance_incorrect_test[j], distance_incorrect_test_std[
                    j] = average_over_positive_values(distance_per_sample_test)

                incorrect_train_samples[j] = distance_per_sample_train.shape[0]
                incorrect_test_samples[j] = distance_per_sample_test.shape[0]

        avg_correct_train = wigthed_average(distance_correct_train,
                                            correct_train_samples)
        avg_correct_train_std = wigthed_average(distance_correct_train_std,
                                                correct_train_samples)
        avg_correct_test = wigthed_average(distance_correct_test,
                                           correct_test_samples)
        avg_correct_test_std = wigthed_average(distance_correct_test_std,
                                               correct_test_samples)

        avg_incorrect_train = wigthed_average(distance_incorrect_train,
                                              incorrect_train_samples)
        avg_incorrect_train_std = wigthed_average(distance_incorrect_train_std,
                                                  incorrect_train_samples)
        avg_incorrect_test = wigthed_average(distance_incorrect_test,
                                             incorrect_test_samples)
        avg_incorrect_test_std = wigthed_average(distance_incorrect_test_std,
                                                 incorrect_test_samples)
        print("\nResults up to class ", str(j), ":")
        if show_correct_distance:
            print("Correctly labeled:")
            print(avg_correct_train, avg_correct_train_std, avg_correct_test,
                  avg_correct_test_std)

        if show_incorrect_distance:
            print("Incorrectly labeled:")
            print(avg_incorrect_train, avg_incorrect_train_std,
                  avg_incorrect_test, avg_incorrect_test_std)

    avg_correct_train = wigthed_average(distance_correct_train,
                                        correct_train_samples)
    avg_correct_train_std = wigthed_average(distance_correct_train_std,
                                            correct_train_samples)
    avg_correct_test = wigthed_average(distance_correct_test,
                                       correct_test_samples)
    avg_correct_test_std = wigthed_average(distance_correct_test_std,
                                           correct_test_samples)

    avg_incorrect_train = wigthed_average(distance_incorrect_train,
                                          incorrect_train_samples)
    avg_incorrect_train_std = wigthed_average(distance_incorrect_train_std,
                                              incorrect_train_samples)
    avg_incorrect_test = wigthed_average(distance_incorrect_test,
                                         incorrect_test_samples)
    avg_incorrect_test_std = wigthed_average(distance_incorrect_test_std,
                                             incorrect_test_samples)

    print("\n\nFinal Results:")
    if show_correct_distance:
        print(
            "Correctly labeled: [train_average train_standard_deviation test_average test_standard_deviation]"
        )
        print(avg_correct_train, avg_correct_train_std, avg_correct_test,
              avg_correct_test_std)

    if show_incorrect_distance:
        print(
            "Incorrectly labeled: [train_average train_standard_deviation test_average test_standard_deviation]"
        )
        print(avg_incorrect_train, avg_incorrect_train_std, avg_incorrect_test,
              avg_incorrect_test_std)
コード例 #2
0
def distance_to_boundary(dataset, num_classes, num_targeted_classes, num_of_samples_per_class, model_name, distance_save_dir):
    if dataset == "mnist":
        (x_train, y_train), (x_test, y_test) = mnist.load_data()
        x_train = x_train.reshape(x_train.shape[0], 28, 28, 1)
        x_test = x_test.reshape(x_test.shape[0], 28, 28, 1)
    elif dataset == "cifar_10":
        (x_train, y_train), (x_test, y_test) = cifar10.load_data()
    else:
        (x_train, y_train), (x_test, y_test) = cifar100.load_data()

    # Convert class vectors to binary class matrices.
    y_train = keras.utils.to_categorical(y_train, num_classes)
    y_test = keras.utils.to_categorical(y_test, num_classes)

    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')
    x_train /= 255
    x_test /= 255

    train_size = x_train.shape[0]
    test_size = x_test.shape[0]

    print(model_name)
    model = keras.models.load_model(model_name)

    confidence_train = model.predict(x_train)
    confidence_test = model.predict(x_test)
    labels_train_by_model = np.argmax(confidence_train, axis=1)
    labels_test_by_model = np.argmax(confidence_test, axis=1)
    labels_train = np.argmax(y_train, axis=1)
    labels_test = np.argmax(y_test, axis=1)

    correctly_classified_indexes_train = labels_train_by_model == labels_train
    incorrectly_classified_indexes_train = labels_train_by_model != labels_train

    correctly_classified_indexes_test = labels_test_by_model == labels_test
    incorrectly_classified_indexes_test = labels_test_by_model != labels_test

    distance_correct_train = np.zeros(num_targeted_classes) - 1
    distance_correct_train_std = np.zeros(num_targeted_classes) - 1
    distance_correct_test = np.zeros(num_targeted_classes) - 1
    distance_correct_test_std = np.zeros(num_targeted_classes) - 1
    distance_incorrect_train = np.zeros(num_targeted_classes) - 1
    distance_incorrect_train_std = np.zeros(num_targeted_classes) - 1
    distance_incorrect_test = np.zeros(num_targeted_classes) - 1
    distance_incorrect_test_std = np.zeros(num_targeted_classes) - 1

    correct_train_samples = np.zeros(num_targeted_classes) - 1
    correct_test_samples = np.zeros(num_targeted_classes) - 1
    incorrect_train_samples = np.zeros(num_targeted_classes) - 1
    incorrect_test_samples = np.zeros(num_targeted_classes) - 1

    for j in range(1, num_targeted_classes):

        if show_correct_distance:
            print('Computing distance for class ', j)
            correctly_classified_indexes_train_of_this_class = np.logical_and(correctly_classified_indexes_train, labels_train == j)
            correctly_classified_indexes_test_of_this_class = np.logical_and(correctly_classified_indexes_test, labels_test == j)
            cor_class_yes_x = x_train[correctly_classified_indexes_train_of_this_class]
            cor_class_no_x = x_test[correctly_classified_indexes_test_of_this_class]

            if num_of_samples_per_class > 0:
                if cor_class_yes_x.shape[0] > num_of_samples_per_class:
                    cor_class_yes_x = cor_class_yes_x[:num_of_samples_per_class]
                if cor_class_no_x.shape[0] > num_of_samples_per_class:
                    cor_class_no_x = cor_class_no_x[:num_of_samples_per_class]

            distance_per_sample_train = np.zeros(cor_class_yes_x.shape[0]) - 1
            distance_per_sample_test = np.zeros(cor_class_no_x.shape[0]) - 1
            for i in range(cor_class_yes_x.shape[0]):
                distance_per_sample_train[i] = dist_to_boundary(cor_class_yes_x[i:i+1], model, j, norm=2, num_classes=num_classes)
                if i % reload_session_period == 0:
                    model = reload_session(model_name)
                    print('Train samples progress: ', i, '/', cor_class_yes_x.shape[0])
            for i in range(cor_class_no_x.shape[0]):
                distance_per_sample_test[i] = dist_to_boundary(cor_class_no_x[i:i+1], model, j, norm=2, num_classes=num_classes)
                if i % reload_session_period == 0:
                    model = reload_session(model_name)
                    print('Test samples progress: ', i, '/', cor_class_no_x.shape[0])

            if save_distances:
                np.save(distance_save_dir + '/' + model_name.split('/')[-1] + '-cor-train-' + str(j), distance_per_sample_train)
                np.save(distance_save_dir + '/' + model_name.split('/')[-1] + '-cor-test-' + str(j), distance_per_sample_test)

            distance_per_sample_train = distance_per_sample_train[distance_per_sample_train != -1]
            distance_per_sample_test = distance_per_sample_test[distance_per_sample_test != -1]

            distance_correct_train[j], distance_correct_train_std[j] = average_over_positive_values(distance_per_sample_train)
            distance_correct_test[j], distance_correct_test_std[j] = average_over_positive_values(distance_per_sample_test)

            correct_train_samples[j] = distance_per_sample_train.shape[0]
            correct_test_samples[j] = distance_per_sample_test.shape[0]


        if show_incorrect_distance:
            print("incorrectly classified...")
            incorrectly_classified_indexes_train_of_this_class = np.logical_and(incorrectly_classified_indexes_train, labels_train == j)
            incorrectly_classified_indexes_test_of_this_class = np.logical_and(incorrectly_classified_indexes_test, labels_test == j)
            incor_class_yes_x = x_train[incorrectly_classified_indexes_train_of_this_class]
            incor_class_no_x = x_test[incorrectly_classified_indexes_test_of_this_class]

            if incor_class_yes_x.shape[0] < 10 or incor_class_no_x.shape[0] < 10:
                print("skip distance computation for inccorectly labeled samples due to lack os misclassified samples!")
            else:
                if num_of_samples_per_class > 0:
                    if incor_class_yes_x.shape[0] > num_of_samples_per_class:
                        incor_class_yes_x = incor_class_yes_x[:num_of_samples_per_class]
                    if incor_class_no_x.shape[0] > num_of_samples_per_class:
                        incor_class_no_x = incor_class_no_x[:num_of_samples_per_class]

                distance_per_sample_train = np.zeros(incor_class_yes_x.shape[0]) - 1
                distance_per_sample_test = np.zeros(incor_class_no_x.shape[0]) - 1
                print(distance_per_sample_train.shape, distance_per_sample_test.shape)
                for i in range(incor_class_yes_x.shape[0]):
                    distance_per_sample_train[i] = dist_to_boundary(incor_class_yes_x[i:i+1], model, j, norm=2, num_classes=num_classes)
                    if i % reload_session_period == 0:
                        model = reload_session(model_name)
                        print('Train samples progress: ', i, '/', incor_class_yes_x.shape[0])
                for i in range(incor_class_no_x.shape[0]):
                    distance_per_sample_test[i] = dist_to_boundary(incor_class_no_x[i:i+1], model, j, norm=2, num_classes=num_classes)
                    if i % reload_session_period == 0:
                        model = reload_session(model_name)
                        print('Train samples progress: ', i, '/', incor_class_no_x.shape[0])

                if save_distances:
                    np.save(distance_save_dir + '/' + model_name.split('/')[-1] + '-incor-train-' + str(j), distance_per_sample_train)
                    np.save(distance_save_dir + '/' + model_name.split('/')[-1] + '-incor-test-' + str(j), distance_per_sample_test)

                distance_per_sample_train = distance_per_sample_train[distance_per_sample_train != -1]
                distance_per_sample_test = distance_per_sample_test[distance_per_sample_test != -1]

                distance_incorrect_train[j], distance_incorrect_train_std[j] = average_over_positive_values(distance_per_sample_train)
                distance_incorrect_test[j], distance_incorrect_test_std[j] = average_over_positive_values(distance_per_sample_test)

                incorrect_train_samples[j] = distance_per_sample_train.shape[0]
                incorrect_test_samples[j] = distance_per_sample_test.shape[0]

        avg_correct_train = wigthed_average(distance_correct_train, correct_train_samples)
        avg_correct_train_std = wigthed_average(distance_correct_train_std, correct_train_samples)
        avg_correct_test = wigthed_average(distance_correct_test, correct_test_samples)
        avg_correct_test_std = wigthed_average(distance_correct_test_std, correct_test_samples)

        avg_incorrect_train = wigthed_average(distance_incorrect_train, incorrect_train_samples)
        avg_incorrect_train_std = wigthed_average(distance_incorrect_train_std, incorrect_train_samples)
        avg_incorrect_test = wigthed_average(distance_incorrect_test, incorrect_test_samples)
        avg_incorrect_test_std = wigthed_average(distance_incorrect_test_std, incorrect_test_samples)
        print("\nResults up to class ", str(j), ":")
        if show_correct_distance:
            print("Correctly labeled:")
            print(avg_correct_train, avg_correct_train_std, avg_correct_test, avg_correct_test_std)

        if show_incorrect_distance:
            print("Incorrectly labeled:")
            print(avg_incorrect_train, avg_incorrect_train_std, avg_incorrect_test, avg_incorrect_test_std)


    avg_correct_train = wigthed_average(distance_correct_train, correct_train_samples)
    avg_correct_train_std = wigthed_average(distance_correct_train_std, correct_train_samples)
    avg_correct_test = wigthed_average(distance_correct_test, correct_test_samples)
    avg_correct_test_std = wigthed_average(distance_correct_test_std, correct_test_samples)

    avg_incorrect_train = wigthed_average(distance_incorrect_train, incorrect_train_samples)
    avg_incorrect_train_std = wigthed_average(distance_incorrect_train_std, incorrect_train_samples)
    avg_incorrect_test = wigthed_average(distance_incorrect_test, incorrect_test_samples)
    avg_incorrect_test_std = wigthed_average(distance_incorrect_test_std, incorrect_test_samples)

    print("\n\nFinal Results:")
    if show_correct_distance:
        print("Correctly labeled: [train_average train_standard_deviation test_average test_standard_deviation]")
        print(avg_correct_train, avg_correct_train_std, avg_correct_test, avg_correct_test_std)

    if show_incorrect_distance:
        print("Incorrectly labeled: [train_average train_standard_deviation test_average test_standard_deviation]")
        print(avg_incorrect_train, avg_incorrect_train_std, avg_incorrect_test, avg_incorrect_test_std)
コード例 #3
0
def conf_based_attack(dataset, attack_classifier, sampling,
                      what_portion_of_samples_attacker_knows,
                      save_confidence_histogram,
                      show_MI_attack_separate_result, num_classes,
                      num_targeted_classes, model_name, verbose):
    if dataset == "mnist":
        (x_train, y_train), (x_test, y_test) = mnist.load_data()
        x_train = x_train.reshape(x_train.shape[0], 28, 28, 1)
        x_test = x_test.reshape(x_test.shape[0], 28, 28, 1)
    elif dataset == "cifar_10":
        (x_train, y_train), (x_test, y_test) = cifar10.load_data()
    else:
        (x_train, y_train), (x_test, y_test) = cifar100.load_data()

    # Convert class vectors to binary class matrices.
    y_train = keras.utils.to_categorical(y_train, num_classes)
    y_test = keras.utils.to_categorical(y_test, num_classes)

    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')
    x_train /= 255
    x_test /= 255

    model = keras.models.load_model(model_name)

    train_stat = model.evaluate(x_train, y_train, verbose=0)
    test_stat = model.evaluate(x_test, y_test, verbose=0)

    acc_train = train_stat[1]
    loss_train = train_stat[0]

    acc_test = test_stat[1]
    loss_test = test_stat[0]

    print(acc_train, acc_test)

    confidence_train = model.predict(x_train)
    confidence_test = model.predict(x_test)
    labels_train_by_model = np.argmax(confidence_train, axis=1)
    labels_test_by_model = np.argmax(confidence_test, axis=1)
    labels_train = np.argmax(y_train, axis=1)
    labels_test = np.argmax(y_test, axis=1)

    temp_indexer = np.arange(confidence_train.shape[0])
    temp_all_conf_array = confidence_train[temp_indexer, labels_train]
    conf_train = np.average(temp_all_conf_array)
    conf_train_std = np.std(temp_all_conf_array)

    correctly_classified_indexes_train = labels_train_by_model == labels_train
    temp_correct_conf_array = confidence_train[
        temp_indexer[correctly_classified_indexes_train],
        labels_train[correctly_classified_indexes_train]]
    conf_train_correct_only = np.average(temp_correct_conf_array)
    conf_train_correct_only_std = np.std(temp_correct_conf_array)

    incorrectly_classified_indexes_train = labels_train_by_model != labels_train
    temp_incorrect_conf_array = confidence_train[
        temp_indexer[incorrectly_classified_indexes_train],
        labels_train_by_model[incorrectly_classified_indexes_train]]
    conf_train_incorrect_only = np.average(temp_incorrect_conf_array)
    conf_train_incorrect_only_std = np.std(temp_incorrect_conf_array)

    # Compute average confidence for test set
    temp_indexer = np.arange(confidence_test.shape[0])
    temp_all_conf_array = confidence_test[temp_indexer, labels_test]
    conf_test = np.average(temp_all_conf_array)
    conf_test_std = np.std(temp_all_conf_array)

    correctly_classified_indexes_test = labels_test_by_model == labels_test
    temp_correct_conf_array = confidence_test[
        temp_indexer[correctly_classified_indexes_test],
        labels_test[correctly_classified_indexes_test]]
    conf_test_correct_only = np.average(temp_correct_conf_array)
    conf_test_correct_only_std = np.std(temp_correct_conf_array)

    incorrectly_classified_indexes_test = labels_test_by_model != labels_test
    temp_incorrect_conf_array = confidence_test[
        temp_indexer[incorrectly_classified_indexes_test],
        labels_test_by_model[incorrectly_classified_indexes_test]]
    conf_test_incorrect_only = np.average(temp_incorrect_conf_array)
    conf_test_incorrect_only_std = np.std(temp_incorrect_conf_array)

    #To store per-class MI attack accuracy
    MI_attack_per_class = np.zeros(num_targeted_classes) - 1
    MI_attack_per_class_correctly_labeled = np.zeros(num_targeted_classes) - 1
    MI_attack_per_class_incorrectly_labeled = np.zeros(
        num_targeted_classes) - 1

    MI_attack_prec_per_class = np.zeros((num_targeted_classes, 2)) - 1
    MI_attack_prec_per_class_correctly_labeled = np.zeros(
        (num_targeted_classes, 2)) - 1
    MI_attack_prec_per_class_incorrectly_labeled = np.zeros(
        (num_targeted_classes, 2)) - 1

    MI_attack_rcal_per_class = np.zeros((num_targeted_classes, 2)) - 1
    MI_attack_rcal_per_class_correctly_labeled = np.zeros(
        (num_targeted_classes, 2)) - 1
    MI_attack_rcal_per_class_incorrectly_labeled = np.zeros(
        (num_targeted_classes, 2)) - 1

    MI_attack_f1_per_class = np.zeros((num_targeted_classes, 2)) - 1
    MI_attack_f1_per_class_correctly_labeled = np.zeros(
        (num_targeted_classes, 2)) - 1
    MI_attack_f1_per_class_incorrectly_labeled = np.zeros(
        (num_targeted_classes, 2)) - 1

    MI_attack_per_class_correctly_labeled_separate = np.zeros(
        num_targeted_classes) - 1
    MI_attack_prec_per_class_correctly_labeled_separate = np.zeros(
        (num_targeted_classes, 2)) - 1
    MI_attack_rcal_per_class_correctly_labeled_separate = np.zeros(
        (num_targeted_classes, 2)) - 1
    MI_attack_f1_per_class_correctly_labeled_separate = np.zeros(
        (num_targeted_classes, 2)) - 1

    # To store per-class MI blind attack accuracy: return 1 if classifier classify correctly, otherwise 0
    MI_attack_blind_per_class = np.zeros(num_targeted_classes) - 1
    MI_attack_blind_per_class_correctly_labeled = np.zeros(
        num_targeted_classes) - 1
    MI_attack_blind_per_class_incorrectly_labeled = np.zeros(
        num_targeted_classes) - 1

    MI_attack_blind_prec_per_class = np.zeros((num_targeted_classes, 2)) - 1
    MI_attack_blind_prec_per_class_correctly_labeled = np.zeros(
        (num_targeted_classes, 2)) - 1
    MI_attack_blind_prec_per_class_incorrectly_labeled = np.zeros(
        (num_targeted_classes, 2)) - 1

    MI_attack_blind_rcal_per_class = np.zeros((num_targeted_classes, 2)) - 1
    MI_attack_blind_rcal_per_class_correctly_labeled = np.zeros(
        (num_targeted_classes, 2)) - 1
    MI_attack_blind_rcal_per_class_incorrectly_labeled = np.zeros(
        (num_targeted_classes, 2)) - 1

    MI_attack_blind_f1_per_class = np.zeros((num_targeted_classes, 2)) - 1
    MI_attack_blind_f1_per_class_correctly_labeled = np.zeros(
        (num_targeted_classes, 2)) - 1
    MI_attack_blind_f1_per_class_incorrectly_labeled = np.zeros(
        (num_targeted_classes, 2)) - 1

    for j in range(num_targeted_classes):
        #Prepare the data for training and testing attack models (for all data and also correctly labeled samples)
        class_yes_x = confidence_train[tuple([labels_train == j])]
        class_no_x = confidence_test[tuple([labels_test == j])]

        if class_yes_x.shape[0] < 15 or class_no_x.shape[0] < 15:
            print("Class " + str(j) +
                  " doesn't have enough sample for training an attack model!")
            continue

        class_yes_x_correctly_labeled = correctly_classified_indexes_train[
            tuple([labels_train == j])]
        class_no_x_correctly_labeled = correctly_classified_indexes_test[tuple(
            [labels_test == j])]

        class_yes_x_incorrectly_labeled = incorrectly_classified_indexes_train[
            tuple([labels_train == j])]
        class_no_x_incorrectly_labeled = incorrectly_classified_indexes_test[
            tuple([labels_test == j])]

        if save_confidence_histogram:
            temp = class_yes_x[class_yes_x_correctly_labeled]
            temp2 = class_no_x[class_no_x_correctly_labeled]
            temp = np.average(temp, axis=0)
            temp2 = np.average(temp2, axis=0)
            plt.style.use('seaborn-deep')
            plt.plot(np.arange(num_classes), temp, 'bx', label="Train samples")
            plt.plot(np.arange(num_classes), temp2, 'go', label="Test samples")
            plt.legend()
            plt.xlabel("Class Number")
            plt.ylabel("Average Confidence")
            plt.savefig('figures/conf histogram/' + dataset + '/correct-' +
                        str(j) + '.eps',
                        bbox_inches='tight')
            plt.close()

            temp = class_yes_x[class_yes_x_incorrectly_labeled]
            temp2 = class_no_x[class_no_x_incorrectly_labeled]
            temp = np.average(temp, axis=0)
            temp2 = np.average(temp2, axis=0)
            plt.style.use('seaborn-deep')
            plt.plot(np.arange(num_classes), temp, 'bx', label="Train samples")
            plt.plot(np.arange(num_classes), temp2, 'go', label="Test samples")
            plt.legend()
            plt.xlabel("Class Number")
            plt.ylabel("Average Confidence")
            plt.savefig('figures/conf histogram/' + dataset +
                        '/misclassified-' + str(j) + '.eps',
                        bbox_inches='tight')
            plt.close()

            temp = class_yes_x[class_yes_x_correctly_labeled]
            temp2 = class_no_x[class_no_x_correctly_labeled]
            bins = np.arange(50) / 50
            plt.style.use('seaborn-deep')
            n, bins, patches = plt.hist(
                [temp[:, j], temp2[:, j]],
                bins,
                normed=1,
                alpha=1,
                label=['Train samples', 'Test samples'])
            plt.xlabel('Model Confidence')
            plt.ylabel('Probability (%)')
            plt.legend(loc='upper left')
            plt.savefig('figures/conf histogram/' + dataset + '/' + str(j) +
                        '.eps',
                        bbox_inches='tight')
            plt.close()

        class_yes_size = int(class_yes_x.shape[0] *
                             what_portion_of_samples_attacker_knows)
        class_yes_x_train = class_yes_x[:class_yes_size]
        class_yes_y_train = np.ones(class_yes_x_train.shape[0])
        class_yes_x_test = class_yes_x[class_yes_size:]
        class_yes_y_test = np.ones(class_yes_x_test.shape[0])
        class_yes_x_correctly_labeled = class_yes_x_correctly_labeled[
            class_yes_size:]
        class_yes_x_incorrectly_labeled = class_yes_x_incorrectly_labeled[
            class_yes_size:]

        class_no_size = int(class_no_x.shape[0] *
                            what_portion_of_samples_attacker_knows)
        class_no_x_train = class_no_x[:class_no_size]
        class_no_y_train = np.zeros(class_no_x_train.shape[0])
        class_no_x_test = class_no_x[class_no_size:]
        class_no_y_test = np.zeros(class_no_x_test.shape[0])
        class_no_x_correctly_labeled = class_no_x_correctly_labeled[
            class_no_size:]
        class_no_x_incorrectly_labeled = class_no_x_incorrectly_labeled[
            class_no_size:]

        y_size = class_yes_x_train.shape[0]
        n_size = class_no_x_train.shape[0]
        if sampling == "undersampling":
            if y_size > n_size:
                class_yes_x_train = class_yes_x_train[:n_size]
                class_yes_y_train = class_yes_y_train[:n_size]
            else:
                class_no_x_train = class_no_x_train[:y_size]
                class_no_y_train = class_no_y_train[:y_size]
        elif sampling == "oversampling":
            if y_size > n_size:
                class_no_x_train = np.tile(class_no_x_train,
                                           (int(y_size / n_size), 1))
                class_no_y_train = np.zeros(class_no_x_train.shape[0])
            else:
                class_yes_x_train = np.tile(class_yes_x_train,
                                            (int(n_size / y_size), 1))
                class_yes_y_train = np.ones(class_yes_x_train.shape[0])

        print('MI attack on class ', j)
        MI_x_train = np.concatenate((class_yes_x_train, class_no_x_train),
                                    axis=0)
        MI_y_train = np.concatenate((class_yes_y_train, class_no_y_train),
                                    axis=0)
        MI_x_test = np.concatenate((class_yes_x_test, class_no_x_test), axis=0)
        MI_y_test = np.concatenate((class_yes_y_test, class_no_y_test), axis=0)
        MI_correctly_labeled_indexes = np.concatenate(
            (class_yes_x_correctly_labeled, class_no_x_correctly_labeled),
            axis=0)
        MI_incorrectly_labeled_indexes = np.concatenate(
            (class_yes_x_incorrectly_labeled, class_no_x_incorrectly_labeled),
            axis=0)

        #preparing data to train an attack model for incorrectly labeled samples
        if show_MI_attack_separate_result:
            cor_class_yes_x = confidence_train[
                correctly_classified_indexes_train]
            cor_class_no_x = confidence_test[correctly_classified_indexes_test]
            cor_class_yes_x = cor_class_yes_x[np.argmax(cor_class_yes_x,
                                                        axis=1) == j]
            cor_class_no_x = cor_class_no_x[np.argmax(cor_class_no_x, axis=1)
                                            == j]

            if cor_class_yes_x.shape[0] < 15 or cor_class_no_x.shape[0] < 15:
                print(
                    "Class " + str(j) +
                    " doesn't have enough sample for training an attack model!"
                )
                continue

            cor_class_yes_size = int(cor_class_yes_x.shape[0] *
                                     what_portion_of_samples_attacker_knows)
            cor_class_no_size = int(cor_class_no_x.shape[0] *
                                    what_portion_of_samples_attacker_knows)

            cor_class_yes_x_train = cor_class_yes_x[:cor_class_yes_size]
            cor_class_yes_y_train = np.ones(cor_class_yes_x_train.shape[0])
            cor_class_yes_x_test = cor_class_yes_x[cor_class_yes_size:]
            cor_class_yes_y_test = np.ones(cor_class_yes_x_test.shape[0])

            cor_class_no_x_train = cor_class_no_x[:cor_class_no_size]
            cor_class_no_y_train = np.zeros(cor_class_no_x_train.shape[0])
            cor_class_no_x_test = cor_class_no_x[cor_class_no_size:]
            cor_class_no_y_test = np.zeros(cor_class_no_x_test.shape[0])

            y_size = cor_class_yes_x_train.shape[0]
            n_size = cor_class_no_x_train.shape[0]
            if sampling == "undersampling":
                if y_size > n_size:
                    cor_class_yes_x_train = cor_class_yes_x_train[:n_size]
                    cor_class_yes_y_train = cor_class_yes_y_train[:n_size]
                else:
                    cor_class_no_x_train = cor_class_no_x_train[:y_size]
                    cor_class_no_y_train = cor_class_no_y_train[:y_size]
            elif sampling == "oversampling":
                if y_size > n_size:
                    cor_class_no_x_train = np.tile(cor_class_no_x_train,
                                                   (int(y_size / n_size), 1))
                    cor_class_no_y_train = np.zeros(
                        cor_class_no_x_train.shape[0])
                else:
                    cor_class_yes_x_train = np.tile(cor_class_yes_x_train,
                                                    (int(n_size / y_size), 1))
                    cor_class_yes_y_train = np.ones(
                        cor_class_yes_x_train.shape[0])

            cor_MI_x_train = np.concatenate(
                (cor_class_yes_x_train, cor_class_no_x_train), axis=0)
            cor_MI_y_train = np.concatenate(
                (cor_class_yes_y_train, cor_class_no_y_train), axis=0)
            cor_MI_x_test = np.concatenate(
                (cor_class_yes_x_test, cor_class_no_x_test), axis=0)
            cor_MI_y_test = np.concatenate(
                (cor_class_yes_y_test, cor_class_no_y_test), axis=0)

        if show_MI_attack:
            if attack_classifier == "NN":
                # Use NN classifier to launch Membership Inference attack (All data + correctly labeled)
                attack_model = Sequential()
                attack_model.add(
                    Dense(128, input_dim=num_classes, activation='relu'))
                attack_model.add(Dense(64, activation='relu'))
                attack_model.add(Dense(1, activation='sigmoid'))
                attack_model.compile(loss='binary_crossentropy',
                                     optimizer='adam',
                                     metrics=['acc'])
                attack_model.fit(MI_x_train,
                                 MI_y_train,
                                 validation_data=(MI_x_test, MI_y_test),
                                 epochs=30,
                                 batch_size=32,
                                 verbose=False,
                                 shuffle=True)

            elif attack_classifier == "RF":
                n_est = [500, 800, 1500, 2500, 5000]
                max_f = ['auto', 'sqrt']
                max_depth = [20, 30, 40, 50]
                max_depth.append(None)
                min_samples_s = [2, 5, 10, 15, 20]
                min_samples_l = [1, 2, 5, 10, 15]
                grid_param = {
                    'n_estimators': n_est,
                    'max_features': max_f,
                    'max_depth': max_depth,
                    'min_samples_split': min_samples_s,
                    'min_samples_leaf': min_samples_l
                }
                RFR = RandomForestClassifier(random_state=1)
                if verbose:
                    RFR_random = RandomizedSearchCV(
                        estimator=RFR,
                        param_distributions=grid_param,
                        n_iter=100,
                        cv=2,
                        verbose=1,
                        random_state=42,
                        n_jobs=-1)
                else:
                    RFR_random = RandomizedSearchCV(
                        estimator=RFR,
                        param_distributions=grid_param,
                        n_iter=100,
                        cv=2,
                        verbose=0,
                        random_state=42,
                        n_jobs=-1)
                RFR_random.fit(MI_x_train, MI_y_train)
                if verbose:
                    print(RFR_random.best_params_)
                attack_model = RFR_random.best_estimator_

            elif attack_classifier == "XGBoost":
                temp_model = XGBClassifier()
                param_grid = dict(scale_pos_weight=[1, 5, 10, 50, 100],
                                  min_child_weight=[1, 5, 10, 15],
                                  subsample=[0.6, 0.8, 1.0],
                                  colsample_bytree=[0.6, 0.8, 1.0],
                                  max_depth=[3, 6, 9, 12])
                # param_grid = dict(scale_pos_weight=[1, 5, 10, 50, 100, 500, 1000])
                cv = RepeatedStratifiedKFold(n_splits=2,
                                             n_repeats=2,
                                             random_state=1)
                # grid = GridSearchCV(estimator=temp_model, param_grid=param_grid, n_jobs=-1, cv=cv, scoring='balanced_accuracy')
                grid = RandomizedSearchCV(estimator=temp_model,
                                          param_distributions=param_grid,
                                          n_iter=50,
                                          n_jobs=-1,
                                          cv=cv,
                                          scoring='balanced_accuracy')
                grid_result = grid.fit(MI_x_train, MI_y_train)
                attack_model = grid_result.best_estimator_
                if verbose:
                    print("Best: %f using %s" %
                          (grid_result.best_score_, grid_result.best_params_))

            # MI attack accuracy on all data
            if attack_classifier == "NN":
                y_pred = attack_model.predict_classes(MI_x_test)
            else:
                y_pred = attack_model.predict(MI_x_test)
            MI_attack_per_class[j] = balanced_accuracy_score(MI_y_test, y_pred)
            MI_attack_prec_per_class[j] = precision_score(MI_y_test,
                                                          y_pred,
                                                          average=None)
            MI_attack_rcal_per_class[j] = recall_score(MI_y_test,
                                                       y_pred,
                                                       average=None)
            MI_attack_f1_per_class[j] = f1_score(MI_y_test,
                                                 y_pred,
                                                 average=None)

            # MI attack accuracy on correctly labeled
            if np.sum(MI_correctly_labeled_indexes) > 0:
                temp_x = MI_x_test[MI_correctly_labeled_indexes]
                temp_y = MI_y_test[MI_correctly_labeled_indexes]
                if attack_classifier == "NN":
                    y_pred = attack_model.predict_classes(temp_x)
                else:
                    y_pred = attack_model.predict(temp_x)
                MI_attack_per_class_correctly_labeled[
                    j] = balanced_accuracy_score(temp_y, y_pred)
                MI_attack_prec_per_class_correctly_labeled[
                    j] = precision_score(temp_y, y_pred, average=None)
                MI_attack_rcal_per_class_correctly_labeled[j] = recall_score(
                    temp_y, y_pred, average=None)
                MI_attack_f1_per_class_correctly_labeled[j] = f1_score(
                    temp_y, y_pred, average=None)

            # MI attack accuracy on incorrectly labeled
            if np.sum(MI_incorrectly_labeled_indexes) > 0:
                temp_x = MI_x_test[MI_incorrectly_labeled_indexes]
                temp_y = MI_y_test[MI_incorrectly_labeled_indexes]
                if attack_classifier == "NN":
                    y_pred = attack_model.predict_classes(temp_x)
                else:
                    y_pred = attack_model.predict(temp_x)
                MI_attack_per_class_incorrectly_labeled[
                    j] = balanced_accuracy_score(temp_y, y_pred)
                MI_attack_prec_per_class_incorrectly_labeled[
                    j] = precision_score(temp_y, y_pred, average=None)
                MI_attack_rcal_per_class_incorrectly_labeled[j] = recall_score(
                    temp_y, y_pred, average=None)
                MI_attack_f1_per_class_incorrectly_labeled[j] = f1_score(
                    temp_y, y_pred, average=None)

            if verbose:
                print('\nMI Attack (all data):')
                print('Accuracy:', MI_attack_per_class[j])
                print('Precision:', MI_attack_prec_per_class[j])
                print('Recall:', MI_attack_rcal_per_class[j])
                print('F1:', MI_attack_f1_per_class[j])
                print('\nMI Attack (correctly classified samples):')
                print('Accuracy:', MI_attack_per_class_correctly_labeled[j])
                print('Precision:',
                      MI_attack_prec_per_class_correctly_labeled[j])
                print('Recall:', MI_attack_rcal_per_class_correctly_labeled[j])
                print('F1:', MI_attack_f1_per_class_correctly_labeled[j])
                print('\nMI Attack (misclassified samples):')
                print('Accuracy:', MI_attack_per_class_incorrectly_labeled[j])
                print('Precision:',
                      MI_attack_prec_per_class_incorrectly_labeled[j])
                print('Recall:',
                      MI_attack_rcal_per_class_incorrectly_labeled[j])
                print('F1:', MI_attack_f1_per_class_incorrectly_labeled[j])

        if show_blind_attack:
            # MI_x_train_blind = MI_x_train[:, j]     #To be fare, I just use the test test, to compare with other attack, so I comment it
            MI_x_test_blind = np.argmax(MI_x_test, axis=1)
            MI_predicted_y_test_blind = [
                1 if l == j else 0 for l in MI_x_test_blind
            ]
            MI_predicted_y_test_blind = np.array(MI_predicted_y_test_blind)

            # MI dump attack accuracy on all data
            y_pred = MI_predicted_y_test_blind
            MI_attack_blind_per_class[j] = balanced_accuracy_score(
                MI_y_test, y_pred)
            MI_attack_blind_prec_per_class[j] = precision_score(MI_y_test,
                                                                y_pred,
                                                                average=None)
            MI_attack_blind_rcal_per_class[j] = recall_score(MI_y_test,
                                                             y_pred,
                                                             average=None)
            MI_attack_blind_f1_per_class[j] = f1_score(MI_y_test,
                                                       y_pred,
                                                       average=None)

            # MI dumpattack accuracy on correctly labeled
            if np.sum(MI_correctly_labeled_indexes) > 0:
                temp_y = MI_y_test[MI_correctly_labeled_indexes]
                y_pred = MI_predicted_y_test_blind[
                    MI_correctly_labeled_indexes]
                MI_attack_blind_per_class_correctly_labeled[
                    j] = balanced_accuracy_score(temp_y, y_pred)
                MI_attack_blind_prec_per_class_correctly_labeled[
                    j] = precision_score(temp_y, y_pred, average=None)
                MI_attack_blind_rcal_per_class_correctly_labeled[
                    j] = recall_score(temp_y, y_pred, average=None)
                MI_attack_blind_f1_per_class_correctly_labeled[j] = f1_score(
                    temp_y, y_pred, average=None)

            # MI dump attack accuracy on incorrectly labeled
            if np.sum(MI_incorrectly_labeled_indexes) > 0:
                temp_y = MI_y_test[MI_incorrectly_labeled_indexes]
                y_pred = MI_predicted_y_test_blind[
                    MI_incorrectly_labeled_indexes]
                MI_attack_blind_per_class_incorrectly_labeled[
                    j] = balanced_accuracy_score(temp_y, y_pred)
                MI_attack_blind_prec_per_class_incorrectly_labeled[
                    j] = precision_score(temp_y, y_pred, average=None)
                MI_attack_blind_rcal_per_class_incorrectly_labeled[
                    j] = recall_score(temp_y, y_pred, average=None)
                MI_attack_blind_f1_per_class_incorrectly_labeled[j] = f1_score(
                    temp_y, y_pred, average=None)

            if verbose:
                print('\nBlind Attack (all data):')
                print('Accuracy:', MI_attack_blind_per_class[j])
                print('Precision:', MI_attack_blind_prec_per_class[j])
                print('Recall:', MI_attack_blind_rcal_per_class[j])
                print('F1:', MI_attack_blind_f1_per_class[j])
                print('\nBlind  Attack (correctly classified samples):')
                print('Accuracy:',
                      MI_attack_blind_per_class_correctly_labeled[j])
                print('Precision:',
                      MI_attack_blind_prec_per_class_correctly_labeled[j])
                print('Recall:',
                      MI_attack_blind_rcal_per_class_correctly_labeled[j])
                print('F1:', MI_attack_blind_f1_per_class_correctly_labeled[j])
                print('\nBlind Attack (misclassified samples):')
                print('Accuracy:',
                      MI_attack_blind_per_class_incorrectly_labeled[j])
                print('Precision:',
                      MI_attack_blind_prec_per_class_incorrectly_labeled[j])
                print('Recall:',
                      MI_attack_blind_rcal_per_class_incorrectly_labeled[j])
                print('F1:',
                      MI_attack_blind_f1_per_class_incorrectly_labeled[j])

        # Use NN classifier to launch Membership Inference attack only on incorrectly labeled
        if show_MI_attack_separate_result:
            if attack_classifier == "NN":
                attack_model = Sequential()
                attack_model.add(
                    Dense(128, input_dim=num_classes, activation='relu'))
                attack_model.add(Dense(64, activation='relu'))
                attack_model.add(Dense(1, activation='sigmoid'))
                attack_model.compile(loss='binary_crossentropy',
                                     optimizer='adam',
                                     metrics=['accuracy'])
                attack_model.fit(cor_MI_x_train,
                                 cor_MI_y_train,
                                 epochs=40,
                                 batch_size=32,
                                 verbose=False)

            elif attack_classifier == "RF":
                n_est = [500, 800, 1500, 2500, 5000]
                max_f = ['auto', 'sqrt']
                max_depth = [20, 30, 40, 50]
                max_depth.append(None)
                min_samples_s = [2, 5, 10, 15, 20]
                min_samples_l = [1, 2, 5, 10, 15]
                grid_param = {
                    'n_estimators': n_est,
                    'max_features': max_f,
                    'max_depth': max_depth,
                    'min_samples_split': min_samples_s,
                    'min_samples_leaf': min_samples_l
                }
                RFR = RandomForestClassifier(random_state=1)
                if verbose:
                    RFR_random = RandomizedSearchCV(
                        estimator=RFR,
                        param_distributions=grid_param,
                        n_iter=100,
                        cv=2,
                        verbose=1,
                        random_state=42,
                        n_jobs=-1)
                else:
                    RFR_random = RandomizedSearchCV(
                        estimator=RFR,
                        param_distributions=grid_param,
                        n_iter=100,
                        cv=2,
                        verbose=0,
                        random_state=42,
                        n_jobs=-1)
                RFR_random.fit(cor_MI_x_train, cor_MI_y_train)
                if verbose:
                    print(RFR_random.best_params_)
                attack_model = RFR_random.best_estimator_

            elif attack_classifier == "XGBoost":
                temp_model = XGBClassifier()
                param_grid = dict(scale_pos_weight=[1, 5, 10, 50, 100],
                                  min_child_weight=[1, 5, 10, 15],
                                  subsample=[0.6, 0.8, 1.0],
                                  colsample_bytree=[0.6, 0.8, 1.0],
                                  max_depth=[3, 6, 9, 12])
                # param_grid = dict(scale_pos_weight=[1, 5, 10, 50, 100, 500, 1000])
                cv = RepeatedStratifiedKFold(n_splits=2,
                                             n_repeats=2,
                                             random_state=1)
                # grid = GridSearchCV(estimator=temp_model, param_grid=param_grid, n_jobs=-1, cv=cv, scoring='balanced_accuracy')
                grid = RandomizedSearchCV(estimator=temp_model,
                                          param_distributions=param_grid,
                                          n_iter=50,
                                          n_jobs=-1,
                                          cv=cv,
                                          scoring='balanced_accuracy')
                grid_result = grid.fit(cor_MI_x_train, cor_MI_y_train)
                attack_model = grid_result.best_estimator_
                if verbose:
                    print("Best: %f using %s" %
                          (grid_result.best_score_, grid_result.best_params_))

            if attack_classifier == "NN":
                y_pred = attack_model.predict_classes(cor_MI_x_test)
            else:
                y_pred = attack_model.predict(cor_MI_x_test)

            MI_attack_per_class_correctly_labeled_separate[
                j] = balanced_accuracy_score(cor_MI_y_test, y_pred)
            MI_attack_prec_per_class_correctly_labeled_separate[
                j] = precision_score(cor_MI_y_test, y_pred, average=None)
            MI_attack_rcal_per_class_correctly_labeled_separate[
                j] = recall_score(cor_MI_y_test, y_pred, average=None)
            MI_attack_f1_per_class_correctly_labeled_separate[j] = f1_score(
                cor_MI_y_test, y_pred, average=None)
            if verbose:
                print(
                    '\nMI Attack model trained only on correctly classified samples:'
                )
                print('Accuracy:',
                      MI_attack_per_class_correctly_labeled_separate[j])
                print('Precision:',
                      MI_attack_prec_per_class_correctly_labeled_separate[j])
                print('Recall:',
                      MI_attack_rcal_per_class_correctly_labeled_separate[j])
                print('F1:',
                      MI_attack_f1_per_class_correctly_labeled_separate[j])

    if show_MI_attack:
        MI_attack, MI_attack_std = average_over_positive_values(
            MI_attack_per_class)
        MI_attack_correct_only, MI_attack_correct_only_std = average_over_positive_values(
            MI_attack_per_class_correctly_labeled)
        MI_attack_incorrect_only, MI_attack_incorrect_only_std = average_over_positive_values(
            MI_attack_per_class_incorrectly_labeled)

        MI_attack_prec, MI_attack_prec_std = average_over_positive_values_of_2d_array(
            MI_attack_prec_per_class)
        MI_attack_prec_correct_only, MI_attack_prec_correct_only_std = average_over_positive_values_of_2d_array(
            MI_attack_prec_per_class_correctly_labeled)
        MI_attack_prec_incorrect_only, MI_attack_prec_incorrect_only_std = average_over_positive_values_of_2d_array(
            MI_attack_prec_per_class_incorrectly_labeled)

        MI_attack_rcal, MI_attack_rcal_std = average_over_positive_values_of_2d_array(
            MI_attack_rcal_per_class)
        MI_attack_rcal_correct_only, MI_attack_rcal_correct_only_std = average_over_positive_values_of_2d_array(
            MI_attack_rcal_per_class_correctly_labeled)
        MI_attack_rcal_incorrect_only, MI_attack_rcal_incorrect_only_std = average_over_positive_values_of_2d_array(
            MI_attack_rcal_per_class_incorrectly_labeled)

        MI_attack_f1, MI_attack_f1_std = average_over_positive_values_of_2d_array(
            MI_attack_f1_per_class)
        MI_attack_f1_correct_only, MI_attack_f1_correct_only_std = average_over_positive_values_of_2d_array(
            MI_attack_f1_per_class_correctly_labeled)
        MI_attack_f1_incorrect_only, MI_attack_f1_incorrect_only_std = average_over_positive_values_of_2d_array(
            MI_attack_f1_per_class_incorrectly_labeled)

    if show_blind_attack:
        MI_attack_blind, MI_attack_blind_std = average_over_positive_values(
            MI_attack_blind_per_class)
        MI_attack_blind_correct_only, MI_attack_blind_correct_only_std = average_over_positive_values(
            MI_attack_blind_per_class_correctly_labeled)
        MI_attack_blind_incorrect_only, MI_attack_blind_incorrect_only_std = average_over_positive_values(
            MI_attack_blind_per_class_incorrectly_labeled)

        MI_attack_blind_prec, MI_attack_blind_prec_std = average_over_positive_values_of_2d_array(
            MI_attack_blind_prec_per_class)
        MI_attack_blind_prec_correct_only, MI_attack_blind_prec_correct_only_std = average_over_positive_values_of_2d_array(
            MI_attack_blind_prec_per_class_correctly_labeled)
        MI_attack_blind_prec_incorrect_only, MI_attack_blind_prec_incorrect_only_std = average_over_positive_values_of_2d_array(
            MI_attack_blind_prec_per_class_incorrectly_labeled)

        MI_attack_blind_rcal, MI_attack_blind_rcal_std = average_over_positive_values_of_2d_array(
            MI_attack_blind_rcal_per_class)
        MI_attack_blind_rcal_correct_only, MI_attack_blind_rcal_correct_only_std = average_over_positive_values_of_2d_array(
            MI_attack_blind_rcal_per_class_correctly_labeled)
        MI_attack_blind_rcal_incorrect_only, MI_attack_blind_rcal_incorrect_only_std = average_over_positive_values_of_2d_array(
            MI_attack_blind_rcal_per_class_incorrectly_labeled)

        MI_attack_blind_f1, MI_attack_blind_f1_std = average_over_positive_values_of_2d_array(
            MI_attack_blind_f1_per_class)
        MI_attack_blind_f1_correct_only, MI_attack_blind_f1_correct_only_std = average_over_positive_values_of_2d_array(
            MI_attack_blind_f1_per_class_correctly_labeled)
        MI_attack_blind_f1_incorrect_only, MI_attack_blind_f1_incorrect_only_std = average_over_positive_values_of_2d_array(
            MI_attack_blind_f1_per_class_incorrectly_labeled)

    if show_MI_attack_separate_result:
        MI_attack_correct_only_separate_model, MI_attack_correct_only_separate_model_std = average_over_positive_values(
            MI_attack_per_class_correctly_labeled_separate)
        MI_attack_prec_correct_only_separate_model, MI_attack_prec_correct_only_separate_model_std = average_over_positive_values_of_2d_array(
            MI_attack_prec_per_class_correctly_labeled_separate)
        MI_attack_rcal_correct_only_separate_model, MI_attack_rcal_correct_only_separate_model_std = average_over_positive_values_of_2d_array(
            MI_attack_rcal_per_class_correctly_labeled_separate)
        MI_attack_f1_correct_only_separate_model, MI_attack_f1_correct_only_separate_model_std = average_over_positive_values_of_2d_array(
            MI_attack_f1_per_class_correctly_labeled_separate)

    print("\n\n---------------------------------------")
    print("Final results:")
    print("Values are in a pair of average and standard deviation.")
    print("\nTarget model accuracy:")
    print(str(np.round(acc_train * 100, 2)), str(np.round(acc_test * 100, 2)))
    print("\nTarget model confidence:")
    print('All train data: ', str(np.round(conf_train * 100, 2)),
          str(np.round(conf_train_std * 100, 2)))
    print('All test data: ', str(np.round(conf_test * 100, 2)),
          str(np.round(conf_test_std * 100, 2)))
    print('Correctly classified train samples: ',
          str(np.round(conf_train_correct_only * 100, 2)),
          str(np.round(conf_train_correct_only_std * 100, 2)))
    print('Correctly classified test samples: ',
          str(np.round(conf_test_correct_only * 100, 2)),
          str(np.round(conf_test_correct_only_std * 100, 2)))
    print('Misclassified train samples: ',
          str(np.round(conf_train_incorrect_only * 100, 2)),
          str(np.round(conf_train_incorrect_only_std * 100, 2)))
    print('Misclassified test samples: ',
          str(np.round(conf_test_incorrect_only * 100, 2)),
          str(np.round(conf_test_incorrect_only_std * 100, 2)))

    if show_MI_attack:
        print("\n\nMI Attack accuracy:")
        print('All data: ', str(np.round(MI_attack * 100, 2)),
              str(np.round(MI_attack_std * 100, 2)))
        print('Correctly classified samples: ',
              str(np.round(MI_attack_correct_only * 100, 2)),
              str(np.round(MI_attack_correct_only_std * 100, 2)))
        print('Misclassified samples: ',
              str(np.round(MI_attack_incorrect_only * 100, 2)),
              str(np.round(MI_attack_incorrect_only_std * 100, 2)))

        print("\nMI Attack precision:")
        print('All data: ', str(np.round(MI_attack_prec * 100, 2)),
              str(np.round(MI_attack_prec_std * 100, 2)))
        print('Correctly classified samples: ',
              str(np.round(MI_attack_prec_correct_only * 100, 2)),
              str(np.round(MI_attack_prec_correct_only_std * 100, 2)))
        print('Misclassified samples: ',
              str(np.round(MI_attack_prec_incorrect_only * 100, 2)),
              str(np.round(MI_attack_prec_incorrect_only_std * 100, 2)))

        print("\nMI Attack recall:")
        print('All data: ', str(np.round(MI_attack_rcal * 100, 2)),
              str(np.round(MI_attack_rcal_std * 100, 2)))
        print('Correctly classified samples: ',
              str(np.round(MI_attack_rcal_correct_only * 100, 2)),
              str(np.round(MI_attack_rcal_correct_only_std * 100, 2)))
        print('Misclassified samples: ',
              str(np.round(MI_attack_rcal_incorrect_only * 100, 2)),
              str(np.round(MI_attack_rcal_incorrect_only_std * 100, 2)))

        print("\nMI Attack f1:")
        print('All data: ', str(np.round(MI_attack_f1 * 100, 2)),
              str(np.round(MI_attack_f1_std * 100, 2)))
        print('Correctly classified samples: ',
              str(np.round(MI_attack_f1_correct_only * 100, 2)),
              str(np.round(MI_attack_f1_correct_only_std * 100, 2)))
        print('Misclassified samples: ',
              str(np.round(MI_attack_f1_incorrect_only * 100, 2)),
              str(np.round(MI_attack_f1_incorrect_only_std * 100, 2)))

    if show_blind_attack:
        print("\n\n\nMI blind Attack accuracy:")
        print('All data: ', str(np.round(MI_attack_blind * 100, 2)),
              str(np.round(MI_attack_blind_std * 100, 2)))
        print('Correctly classified samples: ',
              str(np.round(MI_attack_blind_correct_only * 100, 2)),
              str(np.round(MI_attack_blind_correct_only_std * 100, 2)))
        print('Misclassified samples: ',
              str(np.round(MI_attack_blind_incorrect_only * 100, 2)),
              str(np.round(MI_attack_blind_incorrect_only_std * 100, 2)))

        print("\nMI blind Attack precision:")
        print('All data: ', str(np.round(MI_attack_blind_prec * 100, 2)),
              str(np.round(MI_attack_blind_prec_std * 100, 2)))
        print('Correctly classified samples: ',
              str(np.round(MI_attack_blind_prec_correct_only * 100, 2)),
              str(np.round(MI_attack_blind_prec_correct_only_std * 100, 2)))
        print('Misclassified samples: ',
              str(np.round(MI_attack_blind_prec_incorrect_only * 100, 2)),
              str(np.round(MI_attack_blind_prec_incorrect_only_std * 100, 2)))

        print("\nMI blind Attack recall:")
        print('All data: ', str(np.round(MI_attack_blind_rcal * 100, 2)),
              str(np.round(MI_attack_blind_rcal_std * 100, 2)))
        print('Correctly classified samples: ',
              str(np.round(MI_attack_blind_rcal_correct_only * 100, 2)),
              str(np.round(MI_attack_blind_rcal_correct_only_std * 100, 2)))
        print('Misclassified samples: ',
              str(np.round(MI_attack_blind_rcal_incorrect_only * 100, 2)),
              str(np.round(MI_attack_blind_rcal_incorrect_only_std * 100, 2)))

        print("\nMI blind Attack f1:")
        print('All data: ', str(np.round(MI_attack_blind_f1 * 100, 2)),
              str(np.round(MI_attack_blind_f1_std * 100, 2)))
        print('Correctly classified samples: ',
              str(np.round(MI_attack_blind_f1_correct_only * 100, 2)),
              str(np.round(MI_attack_blind_f1_correct_only_std * 100, 2)))
        print('Misclassified samples: ',
              str(np.round(MI_attack_blind_f1_incorrect_only * 100, 2)),
              str(np.round(MI_attack_blind_f1_incorrect_only_std * 100, 2)))

    if show_MI_attack_separate_result:
        print("\nMI Attack specific to correctly labeled samples:")
        print(
            'Accuracy: ',
            str(np.round(MI_attack_correct_only_separate_model * 100, 2)),
            str(np.round(MI_attack_correct_only_separate_model_std * 100, 2)))
        print(
            'Precision: ',
            str(np.round(MI_attack_prec_correct_only_separate_model * 100, 2)),
            str(
                np.round(MI_attack_prec_correct_only_separate_model_std * 100,
                         2)))
        print(
            'Recall: ',
            str(np.round(MI_attack_rcal_correct_only_separate_model * 100, 2)),
            str(
                np.round(MI_attack_rcal_correct_only_separate_model_std * 100,
                         2)))
        print(
            'F1: ',
            str(np.round(MI_attack_f1_correct_only_separate_model * 100, 2)),
            str(np.round(MI_attack_f1_correct_only_separate_model_std * 100,
                         2)))
コード例 #4
0
                    print("No distance file is available for class " + str(j) + " (for correctly labeled samples)!")
                    continue
            else:
                train_data_file = distance_saved_directory + 'cor-train-' + str(j) + '.npy'
                test_data_file = distance_saved_directory + 'cor-test-' + str(j) + '.npy'
                if os.path.isfile(train_data_file) and os.path.isfile(test_data_file):
                    distance_per_sample_train = np.nan_to_num(np.load(train_data_file), posinf=100000, neginf=-100000)
                    distance_per_sample_test = np.nan_to_num(np.load(test_data_file), posinf=100000, neginf=-100000)
                else:
                    print("No distance file is available for class " + str(j) + " (for correctly labeled samples)!")
                    continue

            distance_per_sample_train = distance_per_sample_train[distance_per_sample_train != -1]
            distance_per_sample_test = distance_per_sample_test[distance_per_sample_test != -1]

            distance_correct_train[j], distance_correct_train_std[j] = average_over_positive_values(distance_per_sample_train)
            distance_correct_test[j], distance_correct_test_std[j] = average_over_positive_values(distance_per_sample_test)

            correct_train_samples[j] = distance_per_sample_train.shape[0]
            correct_test_samples[j] = distance_per_sample_test.shape[0]

            #print(correct_train_samples[j], correct_test_samples[j])
            #print(distance_correct_train[j], distance_correct_train_std[j])
            #print(distance_correct_test[j], distance_correct_test_std[j])

            if correct_train_samples[j] < cor_skip_threshold or correct_test_samples[j] < cor_skip_threshold:
                print("Not enough distance sammple is available for class " + str(j) + " (for correctly labeled samples)!")
            else:
                bal_acc_per_class_correctly_labeled[j], acc_per_class_correctly_labeled[j], \
                far_per_class_correctly_labeled[j], prec_per_class_correctly_labeled[j], \
                rcal_per_class_correctly_labeled[j], \
コード例 #5
0
            if os.path.isfile(train_data_file) and os.path.isfile(
                    test_data_file):
                distance_per_sample_train = np.load(train_data_file)
                distance_per_sample_test = np.load(test_data_file)
            else:
                print("No distance file is available for class " + str(j) +
                      " (for correctly labeled samples)!")
                continue

            distance_per_sample_train = distance_per_sample_train[
                distance_per_sample_train != -1]
            distance_per_sample_test = distance_per_sample_test[
                distance_per_sample_test != -1]

            distance_correct_train[j], distance_correct_train_std[
                j] = average_over_positive_values(distance_per_sample_train)
            distance_correct_test[j], distance_correct_test_std[
                j] = average_over_positive_values(distance_per_sample_test)

            correct_train_samples[j] = distance_per_sample_train.shape[0]
            correct_test_samples[j] = distance_per_sample_test.shape[0]

            #print(correct_train_samples[j], correct_test_samples[j])
            #print(distance_correct_train[j], distance_correct_train_std[j])
            #print(distance_correct_test[j], distance_correct_test_std[j])

            if correct_train_samples[
                    j] < cor_skip_threshold or correct_test_samples[
                        j] < cor_skip_threshold:
                print("Not enough distance sammple is available for class " +
                      str(j) + " (for correctly labeled samples)!")
コード例 #6
0
def intermediate_layer_attack(dataset, intermediate_layer, attack_classifier, sampling, what_portion_of_samples_attacker_knows, num_classes, num_targeted_classes, model_name, verbose, show_MI_attack, show_MI_attack_separate_result, show_MI_attack_separate_result_for_incorrect):

    if dataset == "mnist":
        (x_train, y_train), (x_test, y_test) = mnist.load_data()
        x_train = x_train.reshape(x_train.shape[0], 28, 28, 1)
        x_test = x_test.reshape(x_test.shape[0], 28, 28, 1)
    elif dataset == "cifar_10":
        (x_train, y_train), (x_test, y_test) = cifar10.load_data()
    else:
        (x_train, y_train), (x_test, y_test) = cifar100.load_data()

    # Convert class vectors to binary class matrices.
    y_train = keras.utils.to_categorical(y_train, num_classes)
    y_test = keras.utils.to_categorical(y_test, num_classes)

    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')
    x_train /= 255
    x_test /= 255

    train_size = x_train.shape[0]
    test_size = x_test.shape[0]

    print(model_name)
    model = keras.models.load_model(model_name)

    if dataset == "mnist":
        if intermediate_layer == -1:
            AV_layer_output = model.layers[-2].output
        elif intermediate_layer == -2:
            AV_layer_output = model.layers[-3].output
        elif intermediate_layer == -3:
            AV_layer_output = model.layers[-4].output
        else:
            print("Unknown intermediate layer!")
            exit()
    elif dataset == "cifar_10" or dataset == "cifar_100":
        if intermediate_layer == -1:
            AV_layer_output = model.layers[-5].output
        elif intermediate_layer == -2:
            AV_layer_output = model.layers[-6].output
        else:
            print("Unknown intermediate layer!")
            exit()
    elif dataset == "cifar_100_resnet" or dataset == "cifar_100_densenet":
        if intermediate_layer == -1:
            AV_layer_output = model.layers[-2].output
        else:
            print("Unknown intermediate layer!")
            exit()

    train_stat = model.evaluate(x_train, y_train, verbose=0)
    test_stat = model.evaluate(x_test, y_test, verbose=0)

    acc_train = train_stat[1]
    loss_train = train_stat[0]

    acc_test = test_stat[1]
    loss_test = test_stat[0]

    print(acc_train, acc_test)

    confidence_train = model.predict(x_train)
    confidence_test = model.predict(x_test)
    labels_train_by_model = np.argmax(confidence_train, axis=1)
    labels_test_by_model = np.argmax(confidence_test, axis=1)
    labels_train = np.argmax(y_train, axis=1)
    labels_test = np.argmax(y_test, axis=1)


    correctly_classified_indexes_train = labels_train_by_model == labels_train
    incorrectly_classified_indexes_train = labels_train_by_model != labels_train

    correctly_classified_indexes_test = labels_test_by_model == labels_test
    incorrectly_classified_indexes_test = labels_test_by_model != labels_test

    #To store per-class MI attack accuracy
    MI_attack_per_class = np.zeros(num_targeted_classes) - 1
    MI_attack_per_class_correctly_labeled = np.zeros(num_targeted_classes) - 1
    MI_attack_per_class_incorrectly_labeled = np.zeros(num_targeted_classes) - 1

    MI_attack_auc_per_class = np.zeros(num_targeted_classes) - 1
    MI_attack_auc_per_class_correctly_labeled = np.zeros(num_targeted_classes) - 1
    MI_attack_auc_per_class_incorrectly_labeled = np.zeros(num_targeted_classes) - 1

    MI_attack_prec_per_class = np.zeros((num_targeted_classes, 2)) - 1
    MI_attack_prec_per_class_correctly_labeled = np.zeros((num_targeted_classes, 2)) - 1
    MI_attack_prec_per_class_incorrectly_labeled = np.zeros((num_targeted_classes, 2)) - 1

    MI_attack_rcal_per_class = np.zeros((num_targeted_classes, 2)) - 1
    MI_attack_rcal_per_class_correctly_labeled = np.zeros((num_targeted_classes, 2)) - 1
    MI_attack_rcal_per_class_incorrectly_labeled = np.zeros((num_targeted_classes, 2)) - 1

    MI_attack_f1_per_class = np.zeros((num_targeted_classes, 2)) - 1
    MI_attack_f1_per_class_correctly_labeled = np.zeros((num_targeted_classes, 2)) - 1
    MI_attack_f1_per_class_incorrectly_labeled = np.zeros((num_targeted_classes, 2)) - 1

    MI_attack_per_class_correctly_labeled_separate = np.zeros(num_targeted_classes) - 1
    MI_attack_prec_per_class_correctly_labeled_separate = np.zeros((num_targeted_classes, 2)) - 1
    MI_attack_rcal_per_class_correctly_labeled_separate = np.zeros((num_targeted_classes, 2)) - 1
    MI_attack_f1_per_class_correctly_labeled_separate = np.zeros((num_targeted_classes, 2)) - 1

    #The performance of attack on its training set. To see if it can learn anything
    MI_attack_per_class_correctly_labeled_separate2 = np.zeros(num_targeted_classes) - 1
    MI_attack_prec_per_class_correctly_labeled_separate2 = np.zeros((num_targeted_classes, 2)) - 1
    MI_attack_rcal_per_class_correctly_labeled_separate2 = np.zeros((num_targeted_classes, 2)) - 1
    MI_attack_f1_per_class_correctly_labeled_separate2 = np.zeros((num_targeted_classes, 2)) - 1

    MI_attack_per_class_incorrectly_labeled_separate = np.zeros(num_targeted_classes) - 1
    MI_attack_prec_per_class_incorrectly_labeled_separate = np.zeros((num_targeted_classes, 2)) - 1
    MI_attack_rcal_per_class_incorrectly_labeled_separate = np.zeros((num_targeted_classes, 2)) - 1
    MI_attack_f1_per_class_incorrectly_labeled_separate = np.zeros((num_targeted_classes, 2)) - 1

    #The performance of attack on its training set. To see if it can learn anything
    MI_attack_per_class_incorrectly_labeled_separate2 = np.zeros(num_targeted_classes) - 1
    MI_attack_prec_per_class_incorrectly_labeled_separate2 = np.zeros((num_targeted_classes, 2)) - 1
    MI_attack_rcal_per_class_incorrectly_labeled_separate2 = np.zeros((num_targeted_classes, 2)) - 1
    MI_attack_f1_per_class_incorrectly_labeled_separate2 = np.zeros((num_targeted_classes, 2)) - 1


    intermediate_model = keras.models.Model(inputs=model.input, outputs=AV_layer_output)
    intermediate_value_train = intermediate_model.predict(x_train)
    intermediate_value_test = intermediate_model.predict(x_test)
    attack_input_dimension = intermediate_value_train.shape[1]

    for j in range(num_targeted_classes):

        skip_attack_on_correctly_labeled = False
        skip_attack_on_incorrectly_labeled = False

        #Prepare the data for training and testing attack models (for all data and also correctly labeled samples)
        class_yes_x = intermediate_value_train[tuple([labels_train == j])]
        class_no_x = intermediate_value_test[tuple([labels_test == j])]

        if (class_yes_x.shape[0] < 20 or class_no_x.shape[0] < 20) and show_MI_attack:
            print("Class " + str(j) + " doesn't have enough sample for training an attack model!!")
            continue

        class_yes_x_correctly_labeled = correctly_classified_indexes_train[tuple([labels_train == j])]
        class_no_x_correctly_labeled = correctly_classified_indexes_test[tuple([labels_test == j])]

        class_yes_x_incorrectly_labeled = incorrectly_classified_indexes_train[tuple([labels_train == j])]
        class_no_x_incorrectly_labeled = incorrectly_classified_indexes_test[tuple([labels_test == j])]


        class_yes_size = int(class_yes_x.shape[0] * what_portion_of_samples_attacker_knows)
        class_yes_x_train = class_yes_x[:class_yes_size]
        class_yes_y_train = np.ones(class_yes_x_train.shape[0])
        class_yes_x_test = class_yes_x[class_yes_size:]
        class_yes_y_test = np.ones(class_yes_x_test.shape[0])
        class_yes_x_correctly_labeled = class_yes_x_correctly_labeled[class_yes_size:]
        class_yes_x_incorrectly_labeled = class_yes_x_incorrectly_labeled[class_yes_size:]

        class_no_size = int(class_no_x.shape[0] * what_portion_of_samples_attacker_knows)
        class_no_x_train = class_no_x[:class_no_size]
        class_no_y_train = np.zeros(class_no_x_train.shape[0])
        class_no_x_test = class_no_x[class_no_size:]
        class_no_y_test = np.zeros(class_no_x_test.shape[0])
        class_no_x_correctly_labeled = class_no_x_correctly_labeled[class_no_size:]
        class_no_x_incorrectly_labeled = class_no_x_incorrectly_labeled[class_no_size:]


        y_size = class_yes_x_train.shape[0]
        n_size = class_no_x_train.shape[0]
        if sampling == "undersampling":
            if y_size > n_size:
                class_yes_x_train = class_yes_x_train[:n_size]
                class_yes_y_train = class_yes_y_train[:n_size]
            else:
                class_no_x_train = class_no_x_train[:y_size]
                class_no_y_train = class_no_y_train[:y_size]
        elif sampling == "oversampling":
            if y_size > n_size:
                class_no_x_train = np.tile(class_no_x_train, (int(y_size / n_size), 1))
                class_no_y_train = np.zeros(class_no_x_train.shape[0])
            else:
                class_yes_x_train = np.tile(class_yes_x_train, (int(n_size / y_size), 1))
                class_yes_y_train = np.ones(class_yes_x_train.shape[0])

        print('MI attack on class ', j)
        MI_x_train = np.concatenate((class_yes_x_train, class_no_x_train), axis=0)
        MI_y_train = np.concatenate((class_yes_y_train, class_no_y_train), axis=0)
        MI_x_test = np.concatenate((class_yes_x_test, class_no_x_test), axis=0)
        MI_y_test = np.concatenate((class_yes_y_test, class_no_y_test), axis=0)
        MI_correctly_labeled_indexes = np.concatenate((class_yes_x_correctly_labeled, class_no_x_correctly_labeled), axis=0)
        MI_incorrectly_labeled_indexes = np.concatenate((class_yes_x_incorrectly_labeled, class_no_x_incorrectly_labeled), axis=0)


        #preparing data to train an attack model for correctly labeled samples
        if show_MI_attack_separate_result:
            correctly_classified_indexes_train_of_this_class = np.logical_and(correctly_classified_indexes_train, labels_train == j)
            correctly_classified_indexes_test_of_this_class = np.logical_and(correctly_classified_indexes_test, labels_test == j)
            cor_class_yes_x = intermediate_value_train[correctly_classified_indexes_train_of_this_class]
            cor_class_no_x = intermediate_value_test[correctly_classified_indexes_test_of_this_class]

            if cor_class_yes_x.shape[0] < cor_skip_threshold or cor_class_no_x.shape[0] < cor_skip_threshold:
                print("Class " + str(j) + " doesn't have enough sample of correctly labeled for training an attack model!", cor_class_yes_x.shape[0], cor_class_no_x.shape[0])
                skip_attack_on_correctly_labeled = True



            cor_class_yes_size = int(cor_class_yes_x.shape[0] * what_portion_of_samples_attacker_knows)
            cor_class_no_size = int(cor_class_no_x.shape[0] * what_portion_of_samples_attacker_knows)

            cor_class_yes_x_train = cor_class_yes_x[:cor_class_yes_size]
            cor_class_yes_y_train = np.ones(cor_class_yes_x_train.shape[0])
            cor_class_yes_x_test = cor_class_yes_x[cor_class_yes_size:]
            cor_class_yes_y_test = np.ones(cor_class_yes_x_test.shape[0])

            cor_class_no_x_train = cor_class_no_x[:cor_class_no_size]
            cor_class_no_y_train = np.zeros(cor_class_no_x_train.shape[0])
            cor_class_no_x_test = cor_class_no_x[cor_class_no_size:]
            cor_class_no_y_test = np.zeros(cor_class_no_x_test.shape[0])


            y_size = cor_class_yes_x_train.shape[0]
            n_size = cor_class_no_x_train.shape[0]
            if sampling == "undersampling":
                if y_size > n_size:
                    cor_class_yes_x_train = cor_class_yes_x_train[:n_size]
                    cor_class_yes_y_train = cor_class_yes_y_train[:n_size]
                else:
                    cor_class_no_x_train = cor_class_no_x_train[:y_size]
                    cor_class_no_y_train = cor_class_no_y_train[:y_size]
            elif sampling == "oversampling":
                if y_size > n_size:
                    cor_class_no_x_train = np.tile(cor_class_no_x_train, (int(y_size / n_size), 1))
                    cor_class_no_y_train = np.zeros(cor_class_no_x_train.shape[0])
                else:
                    cor_class_yes_x_train = np.tile(cor_class_yes_x_train, (int(n_size / y_size), 1))
                    cor_class_yes_y_train = np.ones(cor_class_yes_x_train.shape[0])

            cor_MI_x_train = np.concatenate((cor_class_yes_x_train, cor_class_no_x_train), axis=0)
            cor_MI_y_train = np.concatenate((cor_class_yes_y_train, cor_class_no_y_train), axis=0)
            cor_MI_x_test = np.concatenate((cor_class_yes_x_test, cor_class_no_x_test), axis=0)
            cor_MI_y_test = np.concatenate((cor_class_yes_y_test, cor_class_no_y_test), axis=0)

        #preparing data to train an attack model for incorrectly labeled samples
        if show_MI_attack_separate_result_for_incorrect:

            incorrectly_classified_indexes_train_of_this_class = np.logical_and(incorrectly_classified_indexes_train, labels_train == j)
            incorrectly_classified_indexes_test_of_this_class = np.logical_and(incorrectly_classified_indexes_test, labels_test == j)
            incor_class_yes_x = intermediate_value_train[incorrectly_classified_indexes_train_of_this_class]
            incor_class_no_x = intermediate_value_test[incorrectly_classified_indexes_test_of_this_class]

            if incor_class_yes_x.shape[0] < incor_skip_threshold or incor_class_no_x.shape[0] < incor_skip_threshold:
                print("Class " + str(j) + " for inccorectly labeled dataset doesn't have enough sample for training an attack model!", incor_class_yes_x.shape[0], incor_class_no_x.shape[0])
                skip_attack_on_incorrectly_labeled = True


            incor_class_yes_size = int(incor_class_yes_x.shape[0] * what_portion_of_samples_attacker_knows)
            incor_class_no_size = int(incor_class_no_x.shape[0] * what_portion_of_samples_attacker_knows)

            incor_class_yes_x_train = incor_class_yes_x[:incor_class_yes_size]
            incor_class_yes_y_train = np.ones(incor_class_yes_x_train.shape[0])
            incor_class_yes_x_test = incor_class_yes_x[incor_class_yes_size:]
            incor_class_yes_y_test = np.ones(incor_class_yes_x_test.shape[0])

            incor_class_no_x_train = incor_class_no_x[:incor_class_no_size]
            incor_class_no_y_train = np.zeros(incor_class_no_x_train.shape[0])
            incor_class_no_x_test = incor_class_no_x[incor_class_no_size:]
            incor_class_no_y_test = np.zeros(incor_class_no_x_test.shape[0])


            y_size = incor_class_yes_x_train.shape[0]
            n_size = incor_class_no_x_train.shape[0]
            if sampling == "undersampling":
                if y_size > n_size:
                    incor_class_yes_x_train = incor_class_yes_x_train[:n_size]
                    incor_class_yes_y_train = incor_class_yes_y_train[:n_size]
                else:
                    incor_class_no_x_train = incor_class_no_x_train[:y_size]
                    incor_class_no_y_train = incor_class_no_y_train[:y_size]
            elif sampling == "oversampling":
                if y_size > n_size:
                    incor_class_no_x_train = np.tile(incor_class_no_x_train, (int(y_size / n_size), 1))
                    incor_class_no_y_train = np.zeros(incor_class_no_x_train.shape[0])
                else:
                    incor_class_yes_x_train = np.tile(incor_class_yes_x_train, (int(n_size / y_size), 1))
                    incor_class_yes_y_train = np.ones(incor_class_yes_x_train.shape[0])

            incor_MI_x_train = np.concatenate((incor_class_yes_x_train, incor_class_no_x_train), axis=0)
            incor_MI_y_train = np.concatenate((incor_class_yes_y_train, incor_class_no_y_train), axis=0)
            incor_MI_x_test = np.concatenate((incor_class_yes_x_test, incor_class_no_x_test), axis=0)
            incor_MI_y_test = np.concatenate((incor_class_yes_y_test, incor_class_no_y_test), axis=0)

        if show_MI_attack:
            if attack_classifier == "NN":
                # Use NN classifier to launch Membership Inference attack (All data + correctly labeled)
                attack_model = Sequential()
                attack_model.add(Dense(128, input_dim=attack_input_dimension, activation='relu'))
                attack_model.add(Dense(64, activation='relu'))
                attack_model.add(Dense(1, activation='sigmoid'))
                attack_model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['acc'])
                attack_model.fit(MI_x_train, MI_y_train, validation_data=(MI_x_test, MI_y_test), epochs=30, batch_size=32, verbose=False, shuffle=True)

            elif attack_classifier == "RF":
                n_est = [500, 800, 1500, 2500, 5000]
                max_f = ['auto', 'sqrt']
                max_depth = [20, 30, 40, 50]
                max_depth.append(None)
                min_samples_s = [2, 5, 10, 15, 20]
                min_samples_l = [1, 2, 5, 10, 15]
                grid_param = {'n_estimators': n_est,
                              'max_features': max_f,
                              'max_depth': max_depth,
                              'min_samples_split': min_samples_s,
                              'min_samples_leaf': min_samples_l}
                RFR = RandomForestClassifier(random_state=1)
                if verbose:
                    RFR_random = RandomizedSearchCV(estimator=RFR, param_distributions=grid_param, n_iter=100, cv=2, verbose=1, random_state=42, n_jobs=-1)
                else:
                    RFR_random = RandomizedSearchCV(estimator=RFR, param_distributions=grid_param, n_iter=100, cv=2, verbose=0, random_state=42, n_jobs=-1)
                RFR_random.fit(MI_x_train, MI_y_train)
                if verbose:
                    print(RFR_random.best_params_)
                attack_model = RFR_random.best_estimator_

            elif attack_classifier == "XGBoost":
                temp_model = XGBClassifier()
                param_grid = dict(scale_pos_weight=[1, 5, 10, 50, 100], min_child_weight=[1, 5, 10, 15], subsample=[0.6, 0.8, 1.0], colsample_bytree=[0.6, 0.8, 1.0], max_depth=[3, 6, 9, 12])
                # param_grid = dict(scale_pos_weight=[1, 5, 10, 50, 100, 500, 1000])
                cv = RepeatedStratifiedKFold(n_splits=5, n_repeats=2, random_state=1)
                # grid = GridSearchCV(estimator=temp_model, param_grid=param_grid, n_jobs=-1, cv=cv, scoring='balanced_accuracy')
                grid = RandomizedSearchCV(estimator=temp_model, param_distributions=param_grid, n_iter=50, n_jobs=-1, cv=cv, scoring='balanced_accuracy')
                grid_result = grid.fit(MI_x_train, MI_y_train)
                attack_model = grid_result.best_estimator_
                if verbose:
                    print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))

            # MI attack accuracy on all data
            if attack_classifier == "NN":
                y_pred = attack_model.predict_classes(MI_x_test)
            else:
                y_pred = attack_model.predict(MI_x_test)
            MI_attack_per_class[j] = balanced_accuracy_score(MI_y_test, y_pred)
            MI_attack_prec_per_class[j] = precision_score(MI_y_test, y_pred, average=None)
            MI_attack_rcal_per_class[j] = recall_score(MI_y_test, y_pred, average=None)
            MI_attack_f1_per_class[j] = f1_score(MI_y_test, y_pred, average=None)

            # MI attack accuracy on correctly labeled
            if np.sum(MI_correctly_labeled_indexes) > 0:
                temp_x = MI_x_test[MI_correctly_labeled_indexes]
                temp_y = MI_y_test[MI_correctly_labeled_indexes]
                if attack_classifier == "NN":
                    y_pred = attack_model.predict_classes(temp_x)
                else:
                    y_pred = attack_model.predict(temp_x)
                MI_attack_per_class_correctly_labeled[j] = balanced_accuracy_score(temp_y, y_pred)
                MI_attack_prec_per_class_correctly_labeled[j] = precision_score(temp_y, y_pred, average=None)
                MI_attack_rcal_per_class_correctly_labeled[j] = recall_score(temp_y, y_pred, average=None)
                MI_attack_f1_per_class_correctly_labeled[j] = f1_score(temp_y, y_pred, average=None)


            # MI attack accuracy on incorrectly labeled
            if np.sum(MI_incorrectly_labeled_indexes) > 0:
                temp_x = MI_x_test[MI_incorrectly_labeled_indexes]
                temp_y = MI_y_test[MI_incorrectly_labeled_indexes]
                if attack_classifier == "NN":
                    y_pred = attack_model.predict_classes(temp_x)
                else:
                    y_pred = attack_model.predict(temp_x)
                MI_attack_per_class_incorrectly_labeled[j] = balanced_accuracy_score(temp_y, y_pred)
                MI_attack_prec_per_class_incorrectly_labeled[j] = precision_score(temp_y, y_pred, average=None)
                MI_attack_rcal_per_class_incorrectly_labeled[j] = recall_score(temp_y, y_pred, average=None)
                MI_attack_f1_per_class_incorrectly_labeled[j] = f1_score(temp_y, y_pred, average=None)

            if verbose:
                print('\nMI Attack (all data):')
                print('Accuracy:', MI_attack_per_class[j])
                print('Precision:', MI_attack_prec_per_class[j])
                print('Recall:', MI_attack_rcal_per_class[j])
                print('F1:', MI_attack_f1_per_class[j])
                print('\nMI Attack (correctly classified samples):')
                print('Accuracy:', MI_attack_per_class_correctly_labeled[j])
                print('Precision:', MI_attack_prec_per_class_correctly_labeled[j])
                print('Recall:', MI_attack_rcal_per_class_correctly_labeled[j])
                print('F1:', MI_attack_f1_per_class_correctly_labeled[j])
                print('\nMI Attack (misclassified samples):')
                print('Accuracy:', MI_attack_per_class_incorrectly_labeled[j])
                print('Precision:', MI_attack_prec_per_class_incorrectly_labeled[j])
                print('Recall:', MI_attack_rcal_per_class_incorrectly_labeled[j])
                print('F1:', MI_attack_f1_per_class_incorrectly_labeled[j])


        # Use NN classifier to launch Membership Inference attack only on incorrectly labeled
        if show_MI_attack_separate_result and skip_attack_on_correctly_labeled == False:
            if attack_classifier == "NN":
                attack_model = Sequential()
                attack_model.add(Dense(64, input_dim=attack_input_dimension, activation='relu'))
                # attack_model.add(Dense(64, activation='relu'))
                attack_model.add(Dense(1, activation='sigmoid'))
                attack_model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
                attack_model.fit(cor_MI_x_train, cor_MI_y_train, epochs=30, batch_size=32, verbose=False)

            elif attack_classifier == "RF":
                n_est = [500, 800, 1500, 2500, 5000]
                max_f = ['auto', 'sqrt']
                max_depth = [20, 30, 40, 50]
                max_depth.append(None)
                min_samples_s = [2, 5, 10, 15, 20]
                min_samples_l = [1, 2, 5, 10, 15]
                grid_param = {'n_estimators': n_est,
                              'max_features': max_f,
                              'max_depth': max_depth,
                              'min_samples_split': min_samples_s,
                              'min_samples_leaf': min_samples_l}
                RFR = RandomForestClassifier(random_state=1)
                if verbose:
                    RFR_random = RandomizedSearchCV(estimator=RFR, param_distributions=grid_param, n_iter=40, cv=2, verbose=1, random_state=42, n_jobs=-1)
                else:
                    RFR_random = RandomizedSearchCV(estimator=RFR, param_distributions=grid_param, n_iter=40, cv=2,
                                                    verbose=0, random_state=42, n_jobs=-1)
                RFR_random.fit(cor_MI_x_train, cor_MI_y_train)
                if verbose:
                    print(RFR_random.best_params_)
                attack_model = RFR_random.best_estimator_

            elif attack_classifier == "XGBoost":
                temp_model = XGBClassifier()
                param_grid = dict(scale_pos_weight=[1, 5, 10, 50, 100] , min_child_weight=[1, 5, 10, 15], subsample=[0.6, 0.8, 1.0], colsample_bytree=[0.6, 0.8, 1.0], max_depth=[3, 6, 9, 12])
                # param_grid = dict(scale_pos_weight=[1, 5, 10, 50, 100, 500, 1000])
                cv = RepeatedStratifiedKFold(n_splits=5, n_repeats=2, random_state=1)
                # grid = GridSearchCV(estimator=temp_model, param_grid=param_grid, n_jobs=-1, cv=cv, scoring='balanced_accuracy')
                grid = RandomizedSearchCV(estimator=temp_model, param_distributions=param_grid, n_iter=50, n_jobs=-1, cv=cv, scoring='balanced_accuracy')
                grid_result = grid.fit(cor_MI_x_train, cor_MI_y_train)
                attack_model = grid_result.best_estimator_
                if verbose:
                    print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))

            if attack_classifier == "NN":
                y_pred = attack_model.predict_classes(cor_MI_x_test)
                y_pred2 = attack_model.predict_classes(cor_MI_x_train)
            else:
                y_pred = attack_model.predict(cor_MI_x_test)
                y_pred2 = attack_model.predict(cor_MI_x_train)

            MI_attack_per_class_correctly_labeled_separate2[j] = balanced_accuracy_score(cor_MI_y_train, y_pred2)
            MI_attack_prec_per_class_correctly_labeled_separate2[j] = precision_score(cor_MI_y_train, y_pred2, average=None)
            MI_attack_rcal_per_class_correctly_labeled_separate2[j] = recall_score(cor_MI_y_train, y_pred2, average=None)
            MI_attack_f1_per_class_correctly_labeled_separate2[j] = f1_score(cor_MI_y_train, y_pred2, average=None)

            # print('\nMI Attack train set (specific to correctly labeled):', j, MI_attack_per_class_correctly_labeled_separate2[j], cor_MI_x_train.shape[0])
            # print('MI Attack:', MI_attack_prec_per_class_correctly_labeled_separate2[j])
            # print('MI Attack:', MI_attack_rcal_per_class_correctly_labeled_separate2[j])
            # print('MI Attack:', MI_attack_f1_per_class_correctly_labeled_separate2[j])


            MI_attack_per_class_correctly_labeled_separate[j] = balanced_accuracy_score(cor_MI_y_test, y_pred)
            MI_attack_prec_per_class_correctly_labeled_separate[j] = precision_score(cor_MI_y_test, y_pred, average=None)
            MI_attack_rcal_per_class_correctly_labeled_separate[j] = recall_score(cor_MI_y_test, y_pred, average=None)
            MI_attack_f1_per_class_correctly_labeled_separate[j] = f1_score(cor_MI_y_test, y_pred, average=None)

            if verbose:
                print('\nMI Attack model trained only on correctly classified samples:')
                print('Accuracy:', MI_attack_per_class_correctly_labeled_separate[j])
                print('Precision:', MI_attack_prec_per_class_correctly_labeled_separate[j])
                print('Recall:', MI_attack_rcal_per_class_correctly_labeled_separate[j])
                print('F1:', MI_attack_f1_per_class_correctly_labeled_separate[j])

        # Use NN classifier to launch Membership Inference attack only on incorrectly labeled
        if show_MI_attack_separate_result_for_incorrect and skip_attack_on_incorrectly_labeled == False:
            if attack_classifier == "NN":
                attack_model = Sequential()
                attack_model.add(Dense(64, input_dim=attack_input_dimension, activation='relu'))
                # attack_model.add(Dense(64, activation='relu'))
                attack_model.add(Dense(1, activation='sigmoid'))
                attack_model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
                attack_model.fit(incor_MI_x_train, incor_MI_y_train, epochs=30, batch_size=32, verbose=False)

            elif attack_classifier == "RF":
                n_est = [500, 800, 1500, 2500, 5000]
                max_f = ['auto', 'sqrt']
                max_depth = [20, 30, 40, 50]
                max_depth.append(None)
                min_samples_s = [2, 5, 10, 15, 20]
                min_samples_l = [1, 2, 5, 10, 15]
                grid_param = {'n_estimators': n_est,
                              'max_features': max_f,
                              'max_depth': max_depth,
                              'min_samples_split': min_samples_s,
                              'min_samples_leaf': min_samples_l}
                RFR = RandomForestClassifier(random_state=1)
                if verbose:
                    RFR_random = RandomizedSearchCV(estimator=RFR, param_distributions=grid_param, n_iter=100, cv=2, verbose=1, random_state=42, n_jobs=-1)
                else:
                    RFR_random = RandomizedSearchCV(estimator=RFR, param_distributions=grid_param, n_iter=100, cv=2, verbose=0, random_state=42, n_jobs=-1)
                RFR_random.fit(incor_MI_x_train, incor_MI_y_train)
                if verbose:
                    print(RFR_random.best_params_)
                attack_model = RFR_random.best_estimator_

            elif attack_classifier == "XGBoost":
                temp_model = XGBClassifier()
                param_grid = dict(scale_pos_weight=[1, 5, 10, 50, 100] , min_child_weight=[1, 5, 10, 15], subsample=[0.6, 0.8, 1.0], colsample_bytree=[0.6, 0.8, 1.0], max_depth=[3, 6, 9, 12])
                # param_grid = dict(scale_pos_weight=[1, 5, 10, 50, 100, 500, 1000])
                cv = RepeatedStratifiedKFold(n_splits=5, n_repeats=2, random_state=1)
                # grid = GridSearchCV(estimator=temp_model, param_grid=param_grid, n_jobs=-1, cv=cv, scoring='balanced_accuracy')
                grid = RandomizedSearchCV(estimator=temp_model, param_distributions=param_grid, n_iter=50, n_jobs=-1, cv=cv, scoring='balanced_accuracy')
                grid_result = grid.fit(incor_MI_x_train, incor_MI_y_train)
                attack_model = grid_result.best_estimator_
                if verbose:
                    print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))


            if attack_classifier == "NN":
                y_pred = attack_model.predict_classes(incor_MI_x_test)
                y_pred2 = attack_model.predict_classes(incor_MI_x_train)
            else:
                y_pred = attack_model.predict(incor_MI_x_test)
                y_pred2 = attack_model.predict(incor_MI_x_train)

            MI_attack_per_class_incorrectly_labeled_separate2[j] = balanced_accuracy_score(incor_MI_y_train, y_pred2)
            MI_attack_prec_per_class_incorrectly_labeled_separate2[j] = precision_score(incor_MI_y_train, y_pred2, average=None)
            MI_attack_rcal_per_class_incorrectly_labeled_separate2[j] = recall_score(incor_MI_y_train, y_pred2, average=None)
            MI_attack_f1_per_class_incorrectly_labeled_separate2[j] = f1_score(incor_MI_y_train, y_pred2, average=None)

            # print('\nMI Attack train set (specific to incorrectly labeled):', j, MI_attack_per_class_incorrectly_labeled_separate2[j], incor_MI_x_train.shape[0])
            # print('MI Attack:', MI_attack_prec_per_class_incorrectly_labeled_separate2[j])
            # print('MI Attack:', MI_attack_rcal_per_class_incorrectly_labeled_separate2[j])
            # print('MI Attack:', MI_attack_f1_per_class_incorrectly_labeled_separate2[j])


            MI_attack_per_class_incorrectly_labeled_separate[j] = balanced_accuracy_score(incor_MI_y_test, y_pred)
            MI_attack_prec_per_class_incorrectly_labeled_separate[j] = precision_score(incor_MI_y_test, y_pred, average=None)
            MI_attack_rcal_per_class_incorrectly_labeled_separate[j] = recall_score(incor_MI_y_test, y_pred, average=None)
            MI_attack_f1_per_class_incorrectly_labeled_separate[j] = f1_score(incor_MI_y_test, y_pred, average=None)
            if verbose:
                print('\nMI Attack model trained only on correctly classified samples:')
                print('Accuracy:', MI_attack_per_class_incorrectly_labeled_separate[j])
                print('Precision:', MI_attack_prec_per_class_incorrectly_labeled_separate[j])
                print('Recall:', MI_attack_rcal_per_class_incorrectly_labeled_separate[j])
                print('F1:', MI_attack_f1_per_class_incorrectly_labeled_separate[j])


    if show_MI_attack:
        MI_attack, MI_attack_std = average_over_positive_values(MI_attack_per_class)
        MI_attack_correct_only, MI_attack_correct_only_std = average_over_positive_values(MI_attack_per_class_correctly_labeled)
        MI_attack_incorrect_only, MI_attack_incorrect_only_std = average_over_positive_values(MI_attack_per_class_incorrectly_labeled)

        MI_attack_prec, MI_attack_prec_std = average_over_positive_values_of_2d_array(MI_attack_prec_per_class)
        MI_attack_prec_correct_only, MI_attack_prec_correct_only_std = average_over_positive_values_of_2d_array(MI_attack_prec_per_class_correctly_labeled)
        MI_attack_prec_incorrect_only, MI_attack_prec_incorrect_only_std = average_over_positive_values_of_2d_array(MI_attack_prec_per_class_incorrectly_labeled)

        MI_attack_rcal, MI_attack_rcal_std = average_over_positive_values_of_2d_array(MI_attack_rcal_per_class)
        MI_attack_rcal_correct_only, MI_attack_rcal_correct_only_std = average_over_positive_values_of_2d_array(MI_attack_rcal_per_class_correctly_labeled)
        MI_attack_rcal_incorrect_only, MI_attack_rcal_incorrect_only_std = average_over_positive_values_of_2d_array(MI_attack_rcal_per_class_incorrectly_labeled)

        MI_attack_f1, MI_attack_f1_std = average_over_positive_values_of_2d_array(MI_attack_f1_per_class)
        MI_attack_f1_correct_only, MI_attack_f1_correct_only_std = average_over_positive_values_of_2d_array(MI_attack_f1_per_class_correctly_labeled)
        MI_attack_f1_incorrect_only, MI_attack_f1_incorrect_only_std = average_over_positive_values_of_2d_array(MI_attack_f1_per_class_incorrectly_labeled)

    if show_MI_attack_separate_result:
        MI_attack_correct_only_separate_model, MI_attack_correct_only_separate_model_std = average_over_positive_values(MI_attack_per_class_correctly_labeled_separate)
        MI_attack_prec_correct_only_separate_model, MI_attack_prec_correct_only_separate_model_std = average_over_positive_values_of_2d_array(MI_attack_prec_per_class_correctly_labeled_separate)
        MI_attack_rcal_correct_only_separate_model, MI_attack_rcal_correct_only_separate_model_std = average_over_positive_values_of_2d_array(MI_attack_rcal_per_class_correctly_labeled_separate)
        MI_attack_f1_correct_only_separate_model, MI_attack_f1_correct_only_separate_model_std = average_over_positive_values_of_2d_array(MI_attack_f1_per_class_correctly_labeled_separate)

        MI_attack_correct_only_separate_model2, MI_attack_correct_only_separate_model_std2 = average_over_positive_values(MI_attack_per_class_correctly_labeled_separate2)
        MI_attack_prec_correct_only_separate_model2, MI_attack_prec_correct_only_separate_model_std2 = average_over_positive_values_of_2d_array(MI_attack_prec_per_class_correctly_labeled_separate2)
        MI_attack_rcal_correct_only_separate_model2, MI_attack_rcal_correct_only_separate_model_std2 = average_over_positive_values_of_2d_array(MI_attack_rcal_per_class_correctly_labeled_separate2)
        MI_attack_f1_correct_only_separate_model2, MI_attack_f1_correct_only_separate_model_std2 = average_over_positive_values_of_2d_array(MI_attack_f1_per_class_correctly_labeled_separate2)

    if show_MI_attack_separate_result_for_incorrect:
        MI_attack_incorrect_only_separate_model, MI_attack_incorrect_only_separate_model_std = average_over_positive_values(MI_attack_per_class_incorrectly_labeled_separate)
        MI_attack_prec_incorrect_only_separate_model, MI_attack_prec_incorrect_only_separate_model_std = average_over_positive_values_of_2d_array(MI_attack_prec_per_class_incorrectly_labeled_separate)
        MI_attack_rcal_incorrect_only_separate_model, MI_attack_rcal_incorrect_only_separate_model_std = average_over_positive_values_of_2d_array(MI_attack_rcal_per_class_incorrectly_labeled_separate)
        MI_attack_f1_incorrect_only_separate_model, MI_attack_f1_incorrect_only_separate_model_std = average_over_positive_values_of_2d_array(MI_attack_f1_per_class_incorrectly_labeled_separate)

        MI_attack_incorrect_only_separate_model2, MI_attack_incorrect_only_separate_model_std2 = average_over_positive_values(MI_attack_per_class_incorrectly_labeled_separate2)
        MI_attack_prec_incorrect_only_separate_model2, MI_attack_prec_incorrect_only_separate_model_std2 = average_over_positive_values_of_2d_array(MI_attack_prec_per_class_incorrectly_labeled_separate2)
        MI_attack_rcal_incorrect_only_separate_model2, MI_attack_rcal_incorrect_only_separate_model_std2 = average_over_positive_values_of_2d_array(MI_attack_rcal_per_class_incorrectly_labeled_separate2)
        MI_attack_f1_incorrect_only_separate_model2, MI_attack_f1_incorrect_only_separate_model_std2 = average_over_positive_values_of_2d_array(MI_attack_f1_per_class_incorrectly_labeled_separate2)

    print("\n\n---------------------------------------")
    print("Final results:")
    print("Values are in a pair of average and standard deviation.")

    if show_MI_attack:
        print("\n\nMI Attack accuracy:")
        print('All data: ', str(np.round(MI_attack*100, 2)), str(np.round(MI_attack_std*100, 2)))
        print('Correctly classified samples: ', str(np.round(MI_attack_correct_only*100, 2)), str(np.round(MI_attack_correct_only_std*100, 2)))
        print('Misclassified samples: ', str(np.round(MI_attack_incorrect_only * 100, 2)), str(np.round(MI_attack_incorrect_only_std * 100, 2)))

        print("\nMI Attack precision:")
        print('All data: ', str(np.round(MI_attack_prec*100, 2)), str(np.round(MI_attack_prec_std*100, 2)))
        print('Correctly classified samples: ', str(np.round(MI_attack_prec_correct_only*100, 2)), str(np.round(MI_attack_prec_correct_only_std*100, 2)))
        print('Misclassified samples: ', str(np.round(MI_attack_prec_incorrect_only*100, 2)), str(np.round(MI_attack_prec_incorrect_only_std*100, 2)))

        print("\nMI Attack recall:")
        print('All data: ', str(np.round(MI_attack_rcal*100, 2)), str(np.round(MI_attack_rcal_std*100, 2)))
        print('Correctly classified samples: ', str(np.round(MI_attack_rcal_correct_only*100, 2)), str(np.round(MI_attack_rcal_correct_only_std*100, 2)))
        print('Misclassified samples: ', str(np.round(MI_attack_rcal_incorrect_only*100, 2)), str(np.round(MI_attack_rcal_incorrect_only_std*100, 2)))

        print("\nMI Attack f1:")
        print('All data: ', str(np.round(MI_attack_f1*100, 2)), str(np.round(MI_attack_f1_std*100, 2)))
        print('Correctly classified samples: ', str(np.round(MI_attack_f1_correct_only*100, 2)), str(np.round(MI_attack_f1_correct_only_std*100, 2)))
        print('Misclassified samples: ', str(np.round(MI_attack_f1_incorrect_only*100, 2)), str(np.round(MI_attack_f1_incorrect_only_std*100, 2)))

    if show_MI_attack_separate_result:
        # print("\nMI Attack accuracy, specific to correctly labeled samples (on its train set):")
        # print(str(np.round(MI_attack_correct_only_separate_model2*100, 2)), str(np.round(MI_attack_correct_only_separate_model_std2*100, 2)))
        # print(str(np.round(MI_attack_prec_correct_only_separate_model2*100, 2)), str(np.round(MI_attack_prec_correct_only_separate_model_std2*100, 2)))
        # print(str(np.round(MI_attack_rcal_correct_only_separate_model2*100, 2)), str(np.round(MI_attack_rcal_correct_only_separate_model_std2*100, 2)))
        # print(str(np.round(MI_attack_f1_correct_only_separate_model2*100, 2)), str(np.round(MI_attack_f1_correct_only_separate_model_std2*100, 2)))

        print("\nMI attack specific to correctly labeled samples:")
        print('Accuracy: ', str(np.round(MI_attack_correct_only_separate_model*100, 2)), str(np.round(MI_attack_correct_only_separate_model_std*100, 2)))
        print('Precision: ', str(np.round(MI_attack_prec_correct_only_separate_model*100, 2)), str(np.round(MI_attack_prec_correct_only_separate_model_std*100, 2)))
        print('Recall: ', str(np.round(MI_attack_rcal_correct_only_separate_model*100, 2)), str(np.round(MI_attack_rcal_correct_only_separate_model_std*100, 2)))
        print('F1: ', str(np.round(MI_attack_f1_correct_only_separate_model*100, 2)), str(np.round(MI_attack_f1_correct_only_separate_model_std*100, 2)))


    if show_MI_attack_separate_result_for_incorrect:
        # print("\nMI Attack accuracy, specific to ***incorrectly labeled samples (on its train set):")
        # print(str(np.round(MI_attack_incorrect_only_separate_model2*100, 2)), str(np.round(MI_attack_incorrect_only_separate_model_std2*100, 2)))
        # print(str(np.round(MI_attack_prec_incorrect_only_separate_model2*100, 2)), str(np.round(MI_attack_prec_incorrect_only_separate_model_std2*100, 2)))
        # print(str(np.round(MI_attack_rcal_incorrect_only_separate_model2*100, 2)), str(np.round(MI_attack_rcal_incorrect_only_separate_model_std2*100, 2)))
        # print(str(np.round(MI_attack_f1_incorrect_only_separate_model2*100, 2)), str(np.round(MI_attack_f1_incorrect_only_separate_model_std2*100, 2)))

        print("\nMI attack specific to incorrectly labeled samples:")
        print('Accuracy: ', str(np.round(MI_attack_incorrect_only_separate_model*100, 2)), str(np.round(MI_attack_incorrect_only_separate_model_std*100, 2)))
        print('Precision: ', str(np.round(MI_attack_prec_incorrect_only_separate_model*100, 2)), str(np.round(MI_attack_prec_incorrect_only_separate_model_std*100, 2)))
        print('Recall: ', str(np.round(MI_attack_rcal_incorrect_only_separate_model*100, 2)), str(np.round(MI_attack_rcal_incorrect_only_separate_model_std*100, 2)))
        print('F1: ', str(np.round(MI_attack_f1_incorrect_only_separate_model*100, 2)), str(np.round(MI_attack_f1_incorrect_only_separate_model_std*100, 2)))