def test_given_labeled_loss(self, model_fn):
        inputs, labels, model, loss_fn, expected_adv_loss = (
            self._build_linear_regression_model_and_inputs(model_fn))

        with tf.GradientTape() as tape:
            tape.watch(inputs['feature'])
            outputs = model(inputs)
            labeled_loss = loss_fn(labels, outputs)

        # Wraps self.model and self.loss_fn to record the number of times they get
        # called.
        call_count = collections.Counter()

        def wrapped_model(inputs):
            call_count['model'] += 1
            return model(inputs)

        def wrapped_loss_fn(*args, **kwargs):
            call_count['loss_fn'] += 1
            return loss_fn(*args, **kwargs)

        adv_loss = adversarial_regularization.adversarial_loss(
            inputs,
            labels,
            wrapped_model,
            wrapped_loss_fn,
            adv_config=self.adv_config,
            labeled_loss=labeled_loss,
            gradient_tape=tape)
        self.assertAllClose(expected_adv_loss, self.evaluate(adv_loss))
        # The model and loss_fn should be called only once, i.e. not re-calculating
        # the predictions and/or loss on original inputs.
        self.assertEqual(1, call_count['model'])
        self.assertEqual(1, call_count['loss_fn'])
    def test_given_predictions(self, model_fn):
        inputs, labels, model, loss_fn, expected_adv_loss = (
            self._build_linear_regression_model_and_inputs(model_fn))

        with tf.GradientTape() as tape:
            tape.watch(inputs['feature'])
            outputs = model(inputs)

        # Wraps self.model to record the number of times it gets called. The counter
        # cannot be a local variable because assignments to names always go into the
        # innermost scope.
        # https://docs.python.org/3/tutorial/classes.html#python-scopes-and-namespaces
        call_count = collections.Counter()

        def wrapped_model(inputs):
            call_count['model'] += 1
            return model(inputs)

        adv_loss = adversarial_regularization.adversarial_loss(
            inputs,
            labels,
            wrapped_model,
            loss_fn,
            adv_config=self.adv_config,
            predictions=outputs,
            gradient_tape=tape)
        self.assertAllClose(expected_adv_loss, self.evaluate(adv_loss))
        # The model should be called only once, i.e. not re-calculating the
        # predictions on original inputs.
        self.assertEqual(1, call_count['model'])
Esempio n. 3
0
  def test_with_model_kwargs(self):
    w = np.array([[4.0], [-3.0]])
    x0 = np.array([[2.0, 3.0]])
    y0 = np.array([[0.0]])
    model = build_linear_keras_sequential_model(input_shape=(2,), weights=w)
    model.add(tf.keras.layers.BatchNormalization())

    adv_loss = adversarial_regularization.adversarial_loss(
        features={'feature': tf.constant(x0)},
        labels=tf.constant(y0),
        model=model,
        loss_fn=tf.keras.losses.MeanSquaredError(),
        adv_config=self.adv_config,
        model_kwargs={'training': True})
    # BatchNormalization returns 0 for signle-example batch when training=True.
    self.assertAllClose(0.0, self.evaluate(adv_loss))
 def test_normal_case(self, model_fn):
     inputs, labels, model, loss_fn, expected_adv_loss = (
         self._build_linear_regression_model_and_inputs(model_fn))
     adv_loss = adversarial_regularization.adversarial_loss(
         inputs, labels, model, loss_fn, adv_config=self.adv_config)
     self.assertAllClose(expected_adv_loss, self.evaluate(adv_loss))