예제 #1
0
    def test_bad_init_params(self, n_outputs):
        """test bad initializations of BoltOnModel that should raise errors.

    Args:
        n_outputs: number of output neurons
    """
        # test invalid domains for each variable, especially noise
        with self.assertRaises(ValueError):
            models.BoltOnModel(n_outputs)
예제 #2
0
    def test_init_params(self, n_outputs):
        """Test initialization of BoltOnModel.

    Args:
        n_outputs: number of output neurons
    """
        # test valid domains for each variable
        clf = models.BoltOnModel(n_outputs)
        self.assertIsInstance(clf, models.BoltOnModel)
예제 #3
0
def _do_fit(n_samples,
            input_dim,
            n_outputs,
            epsilon,
            generator,
            batch_size,
            reset_n_samples,
            optimizer,
            loss,
            distribution='laplace'):
  """Instantiate necessary components for fitting and perform a model fit.

  Args:
    n_samples: number of samples in dataset
    input_dim: the sample dimensionality
    n_outputs: number of output neurons
    epsilon: privacy parameter
    generator: True to create a generator, False to use an iterator
    batch_size: batch_size to use
    reset_n_samples: True to set _samples to None prior to fitting. False does
      nothing
    optimizer: instance of TestOptimizer
    loss: instance of TestLoss
    distribution: distribution to get noise from.

  Returns:
    BoltOnModel instsance
  """
  clf = models.BoltOnModel(n_outputs)
  clf.compile(optimizer, loss)
  if generator:
    x = _cat_dataset(
        n_samples, input_dim, n_outputs, batch_size, generator=generator)
    y = None
    # x = x.batch(batch_size)
    x = x.shuffle(n_samples // 2)
    batch_size = None
    if reset_n_samples:
      n_samples = None
    clf.fit_generator(
        x,
        n_samples=n_samples,
        noise_distribution=distribution,
        epsilon=epsilon)
  else:
    x, y = _cat_dataset(
        n_samples, input_dim, n_outputs, batch_size, generator=generator)
    if reset_n_samples:
      n_samples = None
    clf.fit(
        x,
        y,
        batch_size=batch_size,
        n_samples=n_samples,
        noise_distribution=distribution,
        epsilon=epsilon)
  return clf
예제 #4
0
    def test_bad_compile(self, n_outputs, loss, optimizer):
        """test bad compilations of BoltOnModel that should raise errors.

    Args:
      n_outputs: number of output neurons
      loss: instantiated TestLoss instance
      optimizer: instantiated TestOptimizer instance
    """
        # test compilaton of invalid tf.optimizer and non instantiated loss.
        with self.cached_session():
            with self.assertRaises((ValueError, AttributeError)):
                clf = models.BoltOnModel(n_outputs)
                clf.compile(optimizer, loss)
예제 #5
0
    def test_compile(self, n_outputs, loss, optimizer):
        """Test compilation of BoltOnModel.

    Args:
      n_outputs: number of output neurons
      loss: instantiated TestLoss instance
      optimizer: instantiated TestOptimizer instance
    """
        # test compilation of valid tf.optimizer and tf.loss
        with self.cached_session():
            clf = models.BoltOnModel(n_outputs)
            clf.compile(optimizer, loss)
            self.assertEqual(clf.loss, loss)
예제 #6
0
  def test_class_errors(self, class_weights, class_counts, num_classes,
                        err_msg):
    """Tests the BOltonModel calculate_class_weights method.

    This test passes invalid params which should raise the expected errors.

    Args:
      class_weights: the class_weights to use.
      class_counts: count of number of samples for each class.
      num_classes: number of outputs neurons.
      err_msg: The expected error message.
    """
    clf = models.BoltOnModel(1, 1)
    with self.assertRaisesRegexp(ValueError, err_msg):  # pylint: disable=deprecated-method
      clf.calculate_class_weights(class_weights, class_counts, num_classes)
예제 #7
0
    def test_class_calculate(self, class_weights, class_counts, num_classes,
                             result):
        """Tests the BOltonModel calculate_class_weights method.

    Args:
      class_weights: the class_weights to use
      class_counts: count of number of samples for each class
      num_classes: number of outputs neurons
      result: expected result
    """
        clf = models.BoltOnModel(1, 1)
        expected = clf.calculate_class_weights(class_weights, class_counts,
                                               num_classes)

        if hasattr(expected, 'numpy'):
            expected = expected.numpy()
        self.assertAllEqual(expected, result)
예제 #8
0
  def test_fit_gen(self, generator):
    """Tests the fit_generator method of BoltOnModel.

    Args:
      generator: True to test with a generator dataset
    """
    loss = TestLoss(1, 1, 1)
    optimizer = TestOptimizer()
    n_classes = 2
    input_dim = 5
    batch_size = 1
    n_samples = 10
    clf = models.BoltOnModel(n_classes)
    clf.compile(optimizer, loss)
    x = _cat_dataset(
        n_samples, input_dim, n_classes, batch_size, generator=generator)
    x = x.batch(batch_size)
    x = x.shuffle(n_samples // 2)
    clf.fit_generator(x, n_samples=n_samples)
    self.assertEqual(hasattr(clf, 'layers'), True)
예제 #9
0
x_stack = [tf.constant(-1, tf.float32, (n_samples, input_dim)),
           tf.constant(1, tf.float32, (n_samples, input_dim))]
y_stack = [tf.constant(0, tf.float32, (n_samples, 1)),
           tf.constant(1, tf.float32, (n_samples, 1))]
x, y = tf.concat(x_stack, 0), tf.concat(y_stack, 0)
print(x.shape, y.shape)
generator = tf.data.Dataset.from_tensor_slices((x, y))
generator = generator.batch(10)
generator = generator.shuffle(10)
# -------
# First, we will explore using the pre - built BoltOnModel, which is a thin
# wrapper around a Keras Model using a single - layer neural network.
# It automatically uses the BoltOn Optimizer which encompasses all the logic
# required for the BoltOn Differential Privacy method.
# -------
bolt = models.BoltOnModel(n_outputs)  # tell the model how many outputs we have.
# -------
# Now, we will pick our optimizer and Strongly Convex Loss function. The loss
# must extend from StrongConvexMixin and implement the associated methods.Some
# existing loss functions are pre - implemented in bolt_on.loss
# -------
optimizer = tf.optimizers.SGD()
reg_lambda = 1
C = 1
radius_constant = 1
loss = losses.StrongConvexBinaryCrossentropy(reg_lambda, C, radius_constant)
# -------
# For simplicity, we pick all parameters of the StrongConvexBinaryCrossentropy
# to be 1; these are all tunable and their impact can be read in losses.
# StrongConvexBinaryCrossentropy.We then compile the model with the chosen
# optimizer and loss, which will automatically wrap the chosen optimizer with