Beispiel #1
0
    def testCallOnPlaceHolder(self):
        inputs = tf.compat.v1.placeholder(dtype=tf.float32)
        dense = core_layers.Dense(4, name='my_dense')
        with self.assertRaises(ValueError):
            dense(inputs)

        inputs = tf.compat.v1.placeholder(dtype=tf.float32, shape=[None, None])
        dense = core_layers.Dense(4, name='my_dense')
        with self.assertRaises(ValueError):
            dense(inputs)

        inputs = tf.compat.v1.placeholder(dtype=tf.float32,
                                          shape=[None, None, None])
        dense = core_layers.Dense(4, name='my_dense')
        with self.assertRaises(ValueError):
            dense(inputs)

        inputs = tf.compat.v1.placeholder(dtype=tf.float32, shape=[None, 3])
        dense = core_layers.Dense(4, name='my_dense')
        dense(inputs)

        inputs = tf.compat.v1.placeholder(dtype=tf.float32,
                                          shape=[None, None, 3])
        dense = core_layers.Dense(4, name='my_dense')
        dense(inputs)
Beispiel #2
0
  def test_count_params(self):
    dense = core_tf_layers.Dense(16)
    dense.build((None, 4))
    self.assertEqual(dense.count_params(), 16 * 4 + 16)

    dense = core_tf_layers.Dense(16)
    with self.assertRaises(ValueError):
      dense.count_params()
Beispiel #3
0
  def testActivation(self):
    dense = core_layers.Dense(2, activation=tf.nn.relu, name='dense1')
    inputs = tf.random.uniform((5, 3), seed=1)
    outputs = dense(inputs)
    if not tf.executing_eagerly():
      self.assertEqual(outputs.op.name, 'dense1/Relu')

    dense = core_layers.Dense(2, name='dense2')
    inputs = tf.random.uniform((5, 3), seed=1)
    outputs = dense(inputs)
    if not tf.executing_eagerly():
      self.assertEqual(outputs.op.name, 'dense2/BiasAdd')
Beispiel #4
0
 def testVariableInput(self):
   with self.cached_session():
     v = tf.compat.v1.get_variable(
         'X', initializer=tf.compat.v1.zeros_initializer(), shape=(1, 1))
     x = core_layers.Dense(1)(v)
     self.evaluate(tf.compat.v1.global_variables_initializer())
     self.assertAllEqual(x, [[0.0]])
Beispiel #5
0
  def testDenseProperties(self):
    dense = core_layers.Dense(2, activation=tf.nn.relu, name='my_dense')
    self.assertEqual(dense.units, 2)
    self.assertEqual(dense.activation, tf.nn.relu)
    self.assertEqual(dense.kernel_regularizer, None)
    self.assertEqual(dense.bias_regularizer, None)
    self.assertEqual(dense.activity_regularizer, None)
    self.assertEqual(dense.use_bias, True)

    # Test auto-naming
    dense = core_layers.Dense(2, activation=tf.nn.relu)
    dense(tf.random.uniform((5, 2)))
    self.assertEqual(dense.name, 'dense_1')
    dense = core_layers.Dense(2, activation=tf.nn.relu)
    dense(tf.random.uniform((5, 2)))
    self.assertEqual(dense.name, 'dense_2')
Beispiel #6
0
def minimize_loss_example(optimizer, use_bias=False, use_callable_loss=True):
    """Example of non-distribution-aware legacy code."""
    def dataset_fn():
        dataset = tf.data.Dataset.from_tensors([[1.0]]).repeat()
        # TODO(isaprykin): batch with drop_remainder causes shapes to be
        # fully defined for TPU.  Remove this when XLA supports dynamic shapes.
        return dataset.batch(1, drop_remainder=True)

    layer = core.Dense(1, use_bias=use_bias)

    def model_fn(x):
        """A very simple model written by the user."""
        def loss_fn():
            y = tf.reshape(layer(x), []) - tf.constant(1.0)
            return y * y

        if isinstance(optimizer, optimizer_v2.OptimizerV2):
            return optimizer.minimize(loss_fn,
                                      lambda: layer.trainable_variables)
        elif use_callable_loss:
            return optimizer.minimize(loss_fn)
        else:
            return optimizer.minimize(loss_fn())

    return model_fn, dataset_fn, layer
Beispiel #7
0
 def testActivityRegularizer(self):
   regularizer = lambda x: tf.reduce_sum(x) * 1e-3
   dense = core_layers.Dense(
       2, name='my_dense', activity_regularizer=regularizer)
   inputs = tf.random.uniform((5, 3), seed=1)
   _ = dense(inputs)
   loss_keys = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES)
   self.assertEqual(len(loss_keys), 1)
   self.assertListEqual(dense.losses, loss_keys)
Beispiel #8
0
 def testBiasRegularizer(self):
   regularizer = lambda x: tf.reduce_sum(x) * 1e-3
   dense = core_layers.Dense(2, name='my_dense', bias_regularizer=regularizer)
   inputs = tf.random.uniform((5, 3), seed=1)
   _ = dense(inputs)
   loss_keys = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES)
   self.assertEqual(len(loss_keys), 1)
   self.evaluate([v.initializer for v in dense.variables])
   self.assertAllEqual(self.evaluate(dense.losses), self.evaluate(loss_keys))
Beispiel #9
0
 def testConstraints(self):
   k_constraint = lambda x: x / tf.reduce_sum(x)
   b_constraint = lambda x: x / tf.reduce_max(x)
   dense = core_layers.Dense(2,
                             kernel_constraint=k_constraint,
                             bias_constraint=b_constraint)
   inputs = tf.random.uniform((5, 3), seed=1)
   dense(inputs)
   self.assertEqual(dense.kernel_constraint, k_constraint)
   self.assertEqual(dense.bias_constraint, b_constraint)
Beispiel #10
0
 def testNonTrainable(self):
   dense = core_layers.Dense(2, trainable=False, name='my_dense')
   inputs = tf.random.uniform((5, 2), seed=1)
   _ = dense(inputs)
   self.assertListEqual(dense.variables, [dense.kernel, dense.bias])
   self.assertListEqual(dense.non_trainable_variables,
                        [dense.kernel, dense.bias])
   self.assertListEqual(dense.trainable_variables, [])
   if not tf.executing_eagerly():
     self.assertEqual(
         len(tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES)), 0)
Beispiel #11
0
  def testOutputShape(self):
    dense = core_layers.Dense(7, activation=tf.nn.relu, name='my_dense')
    inputs = tf.random.uniform((5, 3), seed=1)
    outputs = dense(inputs)
    self.assertEqual(outputs.get_shape().as_list(), [5, 7])

    inputs = tf.random.uniform((5, 2, 3), seed=1)
    outputs = dense(inputs)
    self.assertEqual(outputs.get_shape().as_list(), [5, 2, 7])

    inputs = tf.random.uniform((1, 2, 4, 3), seed=1)
    outputs = dense(inputs)
    self.assertEqual(outputs.get_shape().as_list(), [1, 2, 4, 7])
Beispiel #12
0
 def testCall(self):
   dense = core_layers.Dense(2, activation=tf.nn.relu, name='my_dense')
   inputs = tf.random.uniform((5, 4), seed=1)
   outputs = dense(inputs)
   self.assertListEqual([5, 2], outputs.get_shape().as_list())
   self.assertListEqual(dense.variables, [dense.kernel, dense.bias])
   self.assertListEqual(dense.trainable_variables,
                        [dense.kernel, dense.bias])
   self.assertListEqual(dense.non_trainable_variables, [])
   if not tf.executing_eagerly():
     self.assertEqual(
         len(tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES)), 2)
   self.assertEqual(dense.kernel.name, 'my_dense/kernel:0')
   self.assertEqual(dense.bias.name, 'my_dense/bias:0')
Beispiel #13
0
 def testNoBias(self):
     dense = core_layers.Dense(2, use_bias=False, name="my_dense")
     inputs = tf.random.uniform((5, 2), seed=1)
     _ = dense(inputs)
     self.assertListEqual(dense.variables, [dense.kernel])
     self.assertListEqual(dense.trainable_variables, [dense.kernel])
     self.assertListEqual(dense.non_trainable_variables, [])
     if not tf.executing_eagerly():
         self.assertEqual(
             len(
                 tf.compat.v1.get_collection(
                     tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES)),
             1,
         )
     self.assertEqual(dense.kernel.name, "my_dense/kernel:0")
     self.assertEqual(dense.bias, None)
Beispiel #14
0
 def testComputeOutputShape(self):
     dense = core_layers.Dense(2, activation=tf.nn.relu, name='dense1')
     ts = tf.TensorShape
     # pylint: disable=protected-access
     with self.assertRaises(ValueError):
         dense.compute_output_shape(ts(None))
     with self.assertRaises(ValueError):
         dense.compute_output_shape(ts([]))
     with self.assertRaises(ValueError):
         dense.compute_output_shape(ts([1]))
     self.assertEqual([None, 2],
                      dense.compute_output_shape((None, 3)).as_list())
     self.assertEqual([None, 2],
                      dense.compute_output_shape(ts([None, 3])).as_list())
     self.assertEqual([None, 4, 2],
                      dense.compute_output_shape(ts([None, 4,
                                                     3])).as_list())
Beispiel #15
0
    def testComputeOutputShape(self):
        dense = core_layers.Dense(2, activation=tf.nn.relu, name="dense1")
        ts = tf.TensorShape

        with self.assertRaises(ValueError):
            dense.compute_output_shape(ts(None))
        with self.assertRaises(ValueError):
            dense.compute_output_shape(ts([]))
        with self.assertRaises(ValueError):
            dense.compute_output_shape(ts([1]))
        self.assertEqual([None, 2],
                         dense.compute_output_shape((None, 3)).as_list())
        self.assertEqual([None, 2],
                         dense.compute_output_shape(ts([None, 3])).as_list())
        self.assertEqual([None, 4, 2],
                         dense.compute_output_shape(ts([None, 4,
                                                        3])).as_list())
Beispiel #16
0
def batchnorm_example(
    optimizer_fn,
    batch_per_epoch=1,
    momentum=0.9,
    renorm=False,
    update_ops_in_replica_mode=False,
):
    """Example of non-distribution-aware legacy code with batch
    normalization."""
    def dataset_fn():
        # input shape is [16, 8], input values are increasing in both
        # dimensions.
        return tf.data.Dataset.from_tensor_slices(
            [[[float(x * 8 + y + z * 100) for y in range(8)]
              for x in range(16)] for z in range(batch_per_epoch)]).repeat()

    optimizer = optimizer_fn()
    batchnorm = normalization.BatchNormalization(renorm=renorm,
                                                 momentum=momentum,
                                                 fused=False)
    layer = core.Dense(1, use_bias=False)

    def model_fn(x):
        """A model that uses batchnorm."""
        def loss_fn():
            y = batchnorm(x, training=True)
            with tf.control_dependencies(
                    tf.compat.v1.
                    get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS
                                   ) if update_ops_in_replica_mode else []):
                loss = tf.reduce_mean(
                    tf.reduce_sum(layer(y)) - tf.constant(1.0))
            # `x` and `y` will be fetched by the gradient computation, but not
            # `loss`.
            return loss

        if isinstance(optimizer, optimizer_v2.OptimizerV2):
            return optimizer.minimize(loss_fn,
                                      lambda: layer.trainable_variables)

        # Callable loss.
        return optimizer.minimize(loss_fn)

    return model_fn, dataset_fn, batchnorm
Beispiel #17
0
 def testNoEagerLeak(self):
   # Tests that repeatedly constructing and building a Layer does not leak
   # Python objects.
   inputs = tf.random.uniform((5, 4), seed=1)
   core_layers.Dense(5)(inputs)
   core_layers.Dense(2, activation=tf.nn.relu, name='my_dense')(inputs)
Beispiel #18
0
 def testCallTensorDot(self):
   dense = core_layers.Dense(2, activation=tf.nn.relu, name='my_dense')
   inputs = tf.random.uniform((5, 4, 3), seed=1)
   outputs = dense(inputs)
   self.assertListEqual([5, 4, 2], outputs.get_shape().as_list())