示例#1
0
class HighPrecisionTestCase(unittest.TestCase):
    def setUp(self):
        class ConstantModel(tf.Module):
            def __init__(self, name=None):
                super(ConstantModel, self)
                self.v1 = tf.Variable(2.0, dtype=tf.float64)
                self.v2 = tf.Variable(3.0, dtype=tf.float64)

            def __call__(self, x):
                return self.v1 * self.v1 + self.v2 * self.v2

        def loss_fn(y_true, y_pred):
            return y_pred

        self.influence_model = InfluenceModel(
            ConstantModel(),
            tf.constant([0.0], dtype=tf.float64),
            tf.constant([0.0], dtype=tf.float64),
            tf.constant([0.0]),
            tf.constant([0.0]),
            loss_fn,
            dtype=np.float64,
        )

    def test_get_inverse_hvp(self):
        self.assertAlmostEqual(self.influence_model.get_inverse_hvp(0)[0], 2.0)
        self.assertAlmostEqual(self.influence_model.get_inverse_hvp(0)[1], 3.0)
        pass

    def test_get_inverse_hvp_with_lissa(self):
        self.influence_model.method = "lissa"
        self.influence_model.scaling = 0.1
        self.assertAlmostEqual(self.influence_model.get_inverse_hvp(0)[0], 2.0)
        self.assertAlmostEqual(self.influence_model.get_inverse_hvp(0)[1], 3.0)
        pass
示例#2
0
    def setUp(self):
        class LinearModel(tf.Module):
            def __init__(self, name=None):
                super(LinearModel, self)
                self.v1 = tf.Variable(2.0)
                self.v2 = tf.Variable(3.0)

            def __call__(self, x):
                return (self.v1 * self.v1 + self.v2 * self.v2) * x

        def loss_fn(y_true, y_pred):
            return y_pred

        self.influence_model = InfluenceModel(
            LinearModel(),
            tf.constant([5.0]),
            tf.constant([0.0]),
            tf.constant([7.0]),
            tf.constant([0.0]),
            loss_fn,
        )
示例#3
0
    def setUp(self):
        class ConstantModel(tf.Module):
            def __init__(self, name=None):
                super(ConstantModel, self)
                self.v1 = tf.Variable(2.0, dtype=tf.float64)
                self.v2 = tf.Variable(3.0, dtype=tf.float64)

            def __call__(self, x):
                return self.v1 * self.v1 + self.v2 * self.v2

        def loss_fn(y_true, y_pred):
            return y_pred

        self.influence_model = InfluenceModel(
            ConstantModel(),
            tf.constant([0.0], dtype=tf.float64),
            tf.constant([0.0], dtype=tf.float64),
            tf.constant([0.0]),
            tf.constant([0.0]),
            loss_fn,
            dtype=np.float64,
        )
])

model.compile(
    optimizer="adam",
    loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
    metrics=["accuracy"],
)

model.load_weights("./output/binary_mnist_checkpoint")

influence_model = InfluenceModel(
    model,
    binary_train_images,
    categorical_train_labels,
    binary_test_images,
    categorical_test_labels,
    model.loss,
    damping=0.2,
    dtype=np.float64,
    cg_tol=1e-05,
)

feature_model = model.get_layer(index=0)
prediction_network = model.get_layer(index=1)

representer_model = RepresenterModel(feature_model, prediction_network,
                                     binary_train_images,
                                     categorical_train_labels,
                                     binary_test_images, model.loss)

training_idxs = np.arange(0, 1000).reshape((10, 100))
示例#5
0
class ConstantModelTestCase(unittest.TestCase):
    def setUp(self):
        class ConstantModel(tf.Module):
            def __init__(self, name=None):
                super(ConstantModel, self)
                self.v1 = tf.Variable(2.0)
                self.v2 = tf.Variable(3.0)

            def __call__(self, x):
                return self.v1 * self.v1 + self.v2 * self.v2

        def loss_fn(y_true, y_pred):
            return y_pred

        self.influence_model = InfluenceModel(
            ConstantModel(),
            tf.constant([0.0]),
            tf.constant([0.0]),
            tf.constant([0.0]),
            tf.constant([0.0]),
            loss_fn,
        )

    def test_get_hvp(self):
        self.assertAlmostEqual(
            self.influence_model.get_hvp([1.0, 1.0])[0], 2.0)
        self.assertAlmostEqual(
            self.influence_model.get_hvp([1.0, 1.0])[1], 2.0)
        pass

    def test_get_training_gradient(self):
        self.assertAlmostEqual(
            self.influence_model.get_training_gradient(0)[0], 4.0)
        self.assertAlmostEqual(
            self.influence_model.get_training_gradient(0)[1], 6.0)
        pass

    def test_get_inverse_hvp(self):
        # For float32, there is an error in order 1e-08.
        # This does not occur for float64, so assume it is just some precision limitation.
        self.assertAlmostEqual(self.influence_model.get_inverse_hvp(0)[0],
                               2.0,
                               places=5)
        self.assertAlmostEqual(self.influence_model.get_inverse_hvp(0)[1],
                               3.0,
                               places=5)
        pass

    def test_get_inverse_hvp_with_lissa(self):
        # Similar precision error as with CG.
        self.influence_model.method = "lissa"
        self.influence_model.scaling = 0.1
        self.assertAlmostEqual(self.influence_model.get_inverse_hvp(0)[0],
                               2.0,
                               places=5)
        self.assertAlmostEqual(self.influence_model.get_inverse_hvp(0)[1],
                               3.0,
                               places=5)
        pass

    def test_get_test_gradient(self):
        self.assertAlmostEqual(
            self.influence_model.get_test_gradient(0)[0],
            4.0,
        )
        self.assertAlmostEqual(
            self.influence_model.get_test_gradient(0)[1],
            6.0,
        )
        pass

    def test_get_influence_on_loss(self):
        # Carry-over precision loss from get_inverse_hvp().
        self.assertAlmostEqual(
            self.influence_model.get_influence_on_loss(0, 0),
            -26.0,
            places=5,
        )
        pass

    def test_get_theta_relatif(self):
        # Carry-over precision loss from get_inverse_hvp().
        self.assertAlmostEqual(
            self.influence_model.get_theta_relatif(0, 0),
            -26.0 / math.sqrt(13.0),
            places=5,
        )
        pass

    def test_get_l_relatif(self):
        # Carry-over precision loss from get_inverse_hvp().
        self.assertAlmostEqual(
            self.influence_model.get_l_relatif(0, 0),
            -26.0 / math.sqrt(26.0),
            places=5,
        )
        pass

    def test_get_new_parameters(self):
        # Carry-over precision loss from get_inverse_hvp().
        self.assertAlmostEqual(
            self.influence_model.get_new_parameters(0)[0].numpy(),
            0.0,
            places=5)
        self.assertAlmostEqual(
            self.influence_model.get_new_parameters(0)[1].numpy(),
            0.0,
            places=5)
        pass
示例#6
0
model.load_weights("./output/compas_checkpoint")

num_training_points = 4278
num_test_points = 1000

influence_values = np.zeros((num_training_points, num_test_points))
theta_relatif_values = np.zeros((num_training_points, num_test_points))
l_relatif_values = np.zeros((num_training_points, num_test_points))

influence_model = InfluenceModel(
    model,
    feature_data[train_idxs],
    target_data[train_idxs],
    feature_data[test_idxs],
    target_data[test_idxs],
    model.loss,
    damping=0.01,
    dtype=np.float64,
    cg_tol=1e-05,
)

for i in range(num_training_points):

    print("Computing influence of training point", i, "out of",
          num_training_points)
    for j in range(num_test_points):
        influence_values[i, j] = influence_model.get_influence_on_loss(i, j)
        theta_relatif_values[i, j] = influence_model.get_theta_relatif(i, j)
        l_relatif_values[i, j] = influence_model.get_l_relatif(i, j)
示例#7
0
# Number of validation points = 6000
# Number of test points = 10000

num_training_points = 540
num_test_points = 100

influence_values = np.zeros((num_training_points, num_test_points))
theta_relatif_values = np.zeros((num_training_points, num_test_points))
l_relatif_values = np.zeros((num_training_points, num_test_points))

influence_model = InfluenceModel(
    model,
    train_images,
    categorical_train_labels,
    test_images,
    categorical_test_labels,
    model.loss,
    damping=0.2,
    dtype=np.float64,
    cg_tol=1e-03,
)

for i in range(num_training_points):

    print("Computing influence of training point", i, "out of", num_training_points)    

    for j in range(num_test_points):
        influence_values[i, j] = influence_model.get_influence_on_loss(i, j)
        theta_relatif_values[i, j] = influence_model.get_theta_relatif(i, j)
        l_relatif_values[i, j] = influence_model.get_l_relatif(i, j)