Example #1
0
    def compile(self,
                optimizer,
                loss,
                kernel_initializer=tf.initializers.GlorotUniform,
                **kwargs):  # pylint: disable=arguments-differ
        """See super class. Default optimizer used in BoltOn method is SGD.

    Args:
      optimizer: The optimizer to use. This will be automatically wrapped
        with the BoltOn Optimizer.
      loss: The loss function to use. Must be a StrongConvex loss (extend the
        StrongConvexMixin).
      kernel_initializer: The kernel initializer to use for the single layer.
      **kwargs: kwargs to keras Model.compile. See super.
    """
        if not isinstance(loss, StrongConvexMixin):
            raise ValueError(
                'loss function must be a Strongly Convex and therefore '
                'extend the StrongConvexMixin.')
        if not self._layers_instantiated:  # compile may be called multiple times
            # for instance, if the input/outputs are not defined until fit.
            self.output_layer = tf.keras.layers.Dense(
                self.n_outputs,
                kernel_regularizer=loss.kernel_regularizer(),
                kernel_initializer=kernel_initializer(),
            )
            self._layers_instantiated = True
        if not isinstance(optimizer, BoltOn):
            optimizer = optimizers.get(optimizer)
            optimizer = BoltOn(optimizer, loss)

        super(BoltOnModel, self).compile(optimizer, loss=loss, **kwargs)
Example #2
0
    def test_fit(self, generator, reset_n_samples):
        """Tests fitting of BoltOnModel.

    Args:
      generator: True for generator test, False for iterator test.
      reset_n_samples: True to reset the n_samples to None, False does nothing
    """
        loss = TestLoss(1, 1, 1)
        optimizer = BoltOn(TestOptimizer(), loss)
        n_classes = 2
        input_dim = 5
        epsilon = 1
        batch_size = 1
        n_samples = 10
        clf = _do_fit(
            n_samples,
            input_dim,
            n_classes,
            epsilon,
            generator,
            batch_size,
            reset_n_samples,
            optimizer,
            loss,
        )
        self.assertEqual(hasattr(clf, 'layers'), True)
# -------


class TestModel(tf.keras.Model):  # pylint: disable=abstract-method
    def __init__(self, reg_layer, number_of_outputs=1):
        super(TestModel, self).__init__(name='test')
        self.output_layer = tf.keras.layers.Dense(number_of_outputs,
                                                  kernel_regularizer=reg_layer)

    def call(self, inputs):  # pylint: disable=arguments-differ
        return self.output_layer(inputs)


optimizer = tf.optimizers.SGD()
loss = losses.StrongConvexBinaryCrossentropy(reg_lambda, C, radius_constant)
optimizer = BoltOn(optimizer, loss)
# -------
# Now, we instantiate our model and check for 1. Since our loss requires L2
# regularization over the kernel, we will pass it to the model.
# -------
n_outputs = 1  # parameter for model and optimizer context.
test_model = TestModel(loss.kernel_regularizer(), n_outputs)
test_model.compile(optimizer, loss)
# -------
# We comply with 2., and use the BoltOn Optimizer as a context around the fit
# method.
# -------
# parameters for context
noise_distribution = 'laplace'
epsilon = 2
class_weights = 1  # Previously, the fit method auto-detected the class_weights.