#
        # # # Just checking if 2 class fashion mnist works ###########
        # # idx_to_flip = np.random.choice(num_train_examples, size=num_flips,
        # #                                replace=False)  # Generates random indices from num_train_examples
        # # Y_train_flipped[idx_to_flip] = 1 - Y_train[idx_to_flip]
        # # # Just checking if 2 class fashion mnist works ###########
        #
        # # 1: Randomly change labels to another class ---------------------------------------------------------------
        # for i in range(num_flips):
        #     # https://stackoverflow.com/questions/42999093/generate-random-number-in-range-excluding-some-numbers/42999212
        #     # Y_train_flipped[i] = random.choice([j for j in range(10) if j != Y_train[i]])
        #     Y_train_flipped[i] = random.choice([j for j in range(num_classes) if j != Y_train[i]])

        # 1: Randomly change labels to another class ---------------------------------------------------------------

        tf_model.update_train_x_y(X_train, Y_train_flipped)
        # 2: Include parameters in model.train() to be for ALL_CNN_C for now ---------------------------------------
        tf_model.train()
        # 2: Include parameters in model.train() to be for ALL_CNN_C for now  --------------------------------------
        flipped_results[flips_idx, random_seed_idx, 1:] = tf_model.sess.run(
            [tf_model.loss_no_reg, tf_model.accuracy_op],
            feed_dict=tf_model.all_test_feed_dict)
        print('Flipped loss: %.5f. Accuracy: %.3f' %
              (flipped_results[flips_idx, random_seed_idx, 1],
               flipped_results[flips_idx, random_seed_idx, 2]))

        train_losses = tf_model.sess.run(
            tf_model.indiv_loss_no_reg, feed_dict=tf_model.all_train_feed_dict)
        train_loo_influences = tf_model.get_loo_influences()

        # 3 -------------------------------------------------------------------------------------------------------------------#
Пример #2
0
new_better_X_train = X_train[good_influence_mask]
new_better_Y_train = np.array((Y_train[good_influence_mask] + 1) / 2, dtype=int)
print "Number of bad points removed: ", len(X_train) - len(new_better_X_train)

influences_sevens = influences[Y_train == -1]
influences_hifen_sevens = influences_sevens[np.array(peaks) == 2]
new_train_7s = train_7s[np.array(peaks) != 2]

new_X_train = np.concatenate((train_1s, new_train_7s), axis=0)
new_Y_train = np.concatenate((np.ones(len(train_1s)), np.zeros(len(new_train_7s))), axis=0)

total_population_influence = np.sum(influences_hifen_sevens)

print "Influence of the population", total_population_influence
print "Test class", Y_test[test_idx]
print "Probability score before removing the population", 1 - expit(np.array(tf_model.return_params()).dot(X_test[test_idx]))
tf_model.update_train_x_y(new_X_train, new_Y_train)
tf_model.train()
print "Probability score after removing the population", 1 - expit(np.array(tf_model.return_params()).dot(X_test[test_idx]))
plt.imshow(X_test[test_idx].reshape(28,-1))
plt.show()
tf_model.update_train_x_y(new_better_X_train, new_better_Y_train)
tf_model.train()
print "Probability score after removing the population", 1 - expit(np.array(tf_model.return_params()).dot(X_test[test_idx]))
plt.figure(1)
plt.imshow(removed_example_1)
plt.figure(2)
plt.imshow(removed_example_2)
plt.show()