Ejemplo n.º 1
0
 def test_axes_initialization(self):
     input_spec.InputSpec(shape=[1, None, 2, 3], axes={3: 5, '2': 2})
     with self.assertRaisesRegex(ValueError, 'Axis 4 is greater than'):
         input_spec.InputSpec(shape=[1, None, 2, 3], axes={4: 5})
     with self.assertRaisesRegex(TypeError,
                                 'Argument `axes` must be a dict'):
         input_spec.InputSpec(shape=[1, None, 2, 3], axes={'string': 5})
Ejemplo n.º 2
0
    def test_undefined_shapes(self):
        spec = input_spec.InputSpec(max_ndim=5)
        with self.assertRaisesRegex(ValueError, 'unknown TensorShape'):
            input_spec.to_tensor_shape(spec).as_list()

        spec = input_spec.InputSpec(min_ndim=5, max_ndim=5)
        with self.assertRaisesRegex(ValueError, 'unknown TensorShape'):
            input_spec.to_tensor_shape(spec).as_list()
Ejemplo n.º 3
0
    def test_defined_ndims(self):
        spec = input_spec.InputSpec(ndim=5)
        self.assertAllEqual([None] * 5,
                            input_spec.to_tensor_shape(spec).as_list())

        spec = input_spec.InputSpec(ndim=0)
        self.assertAllEqual([], input_spec.to_tensor_shape(spec).as_list())

        spec = input_spec.InputSpec(ndim=3, axes={1: 3, -1: 2})
        self.assertAllEqual([None, 3, 2],
                            input_spec.to_tensor_shape(spec).as_list())
Ejemplo n.º 4
0
    def test_input_spec_dtype(self):
        # Test the InputSpec's dtype is compared against the inputs before the layer
        # casts them, not after.
        layer = mp_test_util.MultiplyLayer(dtype='float64')
        layer.input_spec = input_spec.InputSpec(dtype='float16')

        # Test passing Eager tensors
        x = tf.ones((2, 2), dtype='float16')
        layer(x)
        x = tf.ones((2, 2), dtype='float64')
        with self.assertRaisesRegex(
                ValueError, 'expected dtype=float16, found dtype=.*float64'):
            layer(x)

        # Test passing symbolic tensors
        x = layers.Input((2, ), dtype='float16')
        y = layer(x)
        model = models.Model(x, y)
        model(tf.ones((2, 2)))

        x = layers.Input((2, ), dtype='float64')
        with self.assertRaisesRegex(
                ValueError, 'expected dtype=float16, found dtype=.*float64'):
            # In TF2, the error is only raised when the model is run
            y = layer(x)
            model = models.Model(x, y)
            model(tf.ones((2, 2)))
Ejemplo n.º 5
0
    def build(self, input_shape):
        if isinstance(input_shape, dict):
            names = sorted(list(input_shape.keys()))
            self.input_specs = []
            self.dense_layers = []
            for name in names:
                shape = input_shape[name]
                layer = core.Dense(
                    units=self.units,
                    use_bias=False,
                    kernel_initializer=self.kernel_initializer,
                    kernel_regularizer=self.kernel_regularizer,
                    name=name,
                )
                layer.build(shape)
                self.input_specs.append(
                    input_spec.InputSpec(shape=shape, name=name)
                )
                self.dense_layers.append(layer)
        elif isinstance(input_shape, (tuple, list)) and all(
            isinstance(shape, tf.TensorShape) for shape in input_shape
        ):
            self.dense_layers = []
            for shape in input_shape:
                layer = core.Dense(
                    units=self.units,
                    use_bias=False,
                    kernel_initializer=self.kernel_initializer,
                    kernel_regularizer=self.kernel_regularizer,
                )
                layer.build(shape)
                self.dense_layers.append(layer)
        else:
            # input_shape can be a single TensorShape or a tuple of ints.
            layer = core.Dense(
                units=self.units,
                use_bias=False,
                kernel_initializer=self.kernel_initializer,
                kernel_regularizer=self.kernel_regularizer,
            )
            layer.build(input_shape)
            self.dense_layers = [layer]

        if self.use_bias:
            self.bias = self.add_weight(
                "bias",
                shape=self.units,
                initializer=self.bias_initializer,
                regularizer=self.bias_regularizer,
                dtype=self.dtype,
                trainable=True,
            )
        else:
            self.bias = None
        self.built = True
Ejemplo n.º 6
0
    def build(self, input_shape):
        input_shape = tf.TensorShape(input_shape)
        # TODO(pmol): Allow higher dimension inputs. Currently the input is expected
        # to have shape [batch_size, dimension].
        if input_shape.rank != 2:
            raise ValueError(
                "The rank of the input tensor should be 2. "
                f"Received input with rank {input_shape.ndims} instead. "
                f"Full input shape received: {input_shape}")
        if input_shape.dims[1].value is None:
            raise ValueError(
                "The last dimension of the input tensor should be defined. "
                f"Found `None`. Full input shape received: {input_shape}")
        self.input_spec = input_spec.InputSpec(
            ndim=2, axes={1: input_shape.dims[1].value})
        input_dim = input_shape.dims[1].value

        kernel_initializer = _get_random_features_initializer(
            self.kernel_initializer, shape=(input_dim, self.output_dim))

        self.unscaled_kernel = self.add_weight(
            name="unscaled_kernel",
            shape=(input_dim, self.output_dim),
            dtype=tf.float32,
            initializer=kernel_initializer,
            trainable=False,
        )

        self.bias = self.add_weight(
            name="bias",
            shape=(self.output_dim, ),
            dtype=tf.float32,
            initializer=initializers.RandomUniform(minval=0.0,
                                                   maxval=2 * np.pi),
            trainable=False,
        )

        if self.scale is None:
            self.scale = _get_default_scale(self.kernel_initializer, input_dim)
        self.kernel_scale = self.add_weight(
            name="kernel_scale",
            shape=(1, ),
            dtype=tf.float32,
            initializer=tf.compat.v1.constant_initializer(self.scale),
            trainable=True,
            constraint="NonNeg",
        )
        super().build(input_shape)
Ejemplo n.º 7
0
    def build(self, input_shape):
        input_shape = tf.TensorShape(input_shape)
        # TODO(pmol): Allow higher dimension inputs. Currently the input is expected
        # to have shape [batch_size, dimension].
        if input_shape.rank != 2:
            raise ValueError(
                'The rank of the input tensor should be 2. Got {} instead.'.
                format(input_shape.ndims))
        if input_shape.dims[1].value is None:
            raise ValueError(
                'The last dimension of the inputs to `RandomFourierFeatures` '
                'should be defined. Found `None`.')
        self.input_spec = input_spec.InputSpec(
            ndim=2, axes={1: input_shape.dims[1].value})
        input_dim = input_shape.dims[1].value

        kernel_initializer = _get_random_features_initializer(
            self.kernel_initializer, shape=(input_dim, self.output_dim))

        self.unscaled_kernel = self.add_weight(name='unscaled_kernel',
                                               shape=(input_dim,
                                                      self.output_dim),
                                               dtype=tf.float32,
                                               initializer=kernel_initializer,
                                               trainable=False)

        self.bias = self.add_weight(
            name='bias',
            shape=(self.output_dim, ),
            dtype=tf.float32,
            initializer=tf.compat.v1.random_uniform_initializer(
                minval=0.0, maxval=2 * np.pi, dtype=tf.float32),
            trainable=False)

        if self.scale is None:
            self.scale = _get_default_scale(self.kernel_initializer, input_dim)
        self.kernel_scale = self.add_weight(
            name='kernel_scale',
            shape=(1, ),
            dtype=tf.float32,
            initializer=tf.compat.v1.constant_initializer(self.scale),
            trainable=True,
            constraint='NonNeg')
        super(RandomFourierFeatures, self).build(input_shape)
Ejemplo n.º 8
0
    def build(self, input_shape):
        input_shape = tf.TensorShape(input_shape)
        input_channel = self._get_input_channel(input_shape)
        if input_channel % self.groups != 0:
            raise ValueError(
                'The number of input channels must be evenly divisible by the number '
                'of groups. Received groups={}, but the input has {} channels '
                '(full input shape is {}).'.format(self.groups, input_channel,
                                                   input_shape))
        kernel_shape = self.kernel_size + (input_channel // self.groups,
                                           self.filters)

        # compute_output_shape contains some validation logic for the input shape,
        # and make sure the output shape has all positive dimensions.
        self.compute_output_shape(input_shape)

        self.kernel = self.add_weight(name='kernel',
                                      shape=kernel_shape,
                                      initializer=self.kernel_initializer,
                                      regularizer=self.kernel_regularizer,
                                      constraint=self.kernel_constraint,
                                      layout=self.kernel_layout,
                                      trainable=True,
                                      dtype=self.dtype)
        if self.use_bias:
            self.bias = self.add_weight(name='bias',
                                        shape=(self.filters, ),
                                        initializer=self.bias_initializer,
                                        regularizer=self.bias_regularizer,
                                        constraint=self.bias_constraint,
                                        layout=self.bias_layout,
                                        trainable=True,
                                        dtype=self.dtype)
        else:
            self.bias = None
        channel_axis = self._get_channel_axis()
        self.input_spec = input_spec.InputSpec(
            min_ndim=self.rank + 2, axes={channel_axis: input_channel})
        self.built = True
Ejemplo n.º 9
0
    def build(self, input_shape):
        dtype = tf.as_dtype(self.dtype or backend.floatx())
        if not (dtype.is_floating or dtype.is_complex):
            raise TypeError(
                'A Dense layer can only be built with a floating-point '
                f'dtype. Received: dtype={dtype}')

        input_shape = tf.TensorShape(input_shape)
        last_dim = tf.compat.dimension_value(input_shape[-1])
        if last_dim is None:
            raise ValueError(
                'The last dimension of the inputs to a Dense layer '
                'should be defined. Found None. '
                f'Full input shape received: {input_shape}')
        self.input_spec = input_spec.InputSpec(min_ndim=2, axes={-1: last_dim})
        self.kernel = self.add_weight('kernel',
                                      shape=[last_dim, self.units],
                                      initializer=self.kernel_initializer,
                                      regularizer=self.kernel_regularizer,
                                      constraint=self.kernel_constraint,
                                      layout=self.kernel_layout,
                                      dtype=self.dtype,
                                      trainable=True)
        if self.use_bias:
            self.bias = self.add_weight('bias',
                                        shape=[
                                            self.units,
                                        ],
                                        initializer=self.bias_initializer,
                                        regularizer=self.bias_regularizer,
                                        constraint=self.bias_constraint,
                                        layout=self.bias_layout,
                                        dtype=self.dtype,
                                        trainable=True)
        else:
            self.bias = None
        self.built = True
Ejemplo n.º 10
0
 def __init__(self):
     super().__init__()
     self.input_spec = input_spec.InputSpec(dtype="float32")
Ejemplo n.º 11
0
 def __init__(self):
     super().__init__()
     self.input_spec = input_spec.InputSpec(axes={-1: 2})
Ejemplo n.º 12
0
 def __init__(self):
     super().__init__()
     self.input_spec = input_spec.InputSpec(shape=(None, 3))
Ejemplo n.º 13
0
 def test_defined_shape(self):
     spec = input_spec.InputSpec(shape=[1, None, 2, 3])
     self.assertAllEqual([1, None, 2, 3],
                         input_spec.to_tensor_shape(spec).as_list())
Ejemplo n.º 14
0
 def __init__(self):
   super(CustomerLayer, self).__init__()
   self.input_spec = input_spec.InputSpec(max_ndim=2)
Ejemplo n.º 15
0
 def __init__(self):
   super(CustomerLayer, self).__init__()
   self.input_spec = input_spec.InputSpec(dtype='float32')
Ejemplo n.º 16
0
    def test_model(
        self,
        strategy_fn,
        use_operator=False,
        use_regularizer=False,
        policy_name="mixed_float16",
        get_config=False,
        save_format=None,
        use_input_spec=False,
    ):
        self._skip_if_strategy_unsupported(strategy_fn)
        self._skip_if_save_format_unsupported(save_format)
        if use_regularizer:
            weight_regularizer = mp_test_util.IdentityRegularizer()
            activity_regularizer = mp_test_util.ReduceSumRegularizer()
        else:
            weight_regularizer = activity_regularizer = None
        with strategy_fn().scope():
            with policy.policy_scope(policy_name):
                layer = mp_test_util.MultiplyLayer(
                    assert_type=tf.float16,
                    use_operator=use_operator,
                    regularizer=weight_regularizer,
                    activity_regularizer=activity_regularizer,
                    input_shape=(1,),
                )
                if use_input_spec:
                    layer.input_spec = input_spec.InputSpec(shape=(None, 1))
                model = test_utils.get_model_from_layers(
                    [layer], input_shape=(1,), input_dtype=tf.float16
                )
                if get_config:
                    config = model.get_config()
                    model = model.__class__.from_config(
                        config,
                        custom_objects={
                            "MultiplyLayer": mp_test_util.MultiplyLayer
                        },
                    )
                    (layer,) = (
                        layer
                        for layer in model.layers
                        if isinstance(layer, mp_test_util.MultiplyLayer)
                    )

                def loss_fn(y_true, y_pred):
                    del y_true
                    return tf.reduce_mean(y_pred)

                # Learning rate is small enough that if applied to a float16 variable,
                # the variable will not change. So this tests the learning rate not
                # applied to a float16 value, but instead the float32 variable.
                opt = gradient_descent.SGD(2**-14)
                # Use a fixed loss scale, as this test will fail if gradients are
                # skipped for a step due to dynamic loss scaling.
                opt = loss_scale_optimizer.LossScaleOptimizer(
                    opt, dynamic=False, initial_scale=8
                )
                model.compile(
                    opt,
                    loss=loss_fn,
                    run_eagerly=test_utils.should_run_eagerly(),
                )

        x = np.ones((2, 1))
        y = np.ones((2, 1))
        dataset = tf.data.Dataset.from_tensor_slices((x, y)).batch(2)
        model.fit(dataset)
        # Variable starts at 1, and should have gradient of 2 ** -14 subtracted
        # from it.
        expected = 1 - 2**-14
        if use_regularizer:
            # Weight and activity regularizer each add another 2 ** -14 to the
            # gradient.
            expected -= 2 * 2**-14
        self.assertEqual(backend.eval(layer.v), expected)

        if save_format:
            with generic_utils.CustomObjectScope(
                {
                    "MultiplyLayer": mp_test_util.MultiplyLayer,
                    "loss_fn": loss_fn,
                }
            ):
                self._test_saving(model, dataset, save_format, use_regularizer)
Ejemplo n.º 17
0
 def __init__(self):
     super().__init__()
     self.input_spec = input_spec.InputSpec(max_ndim=2)
Ejemplo n.º 18
0
 def __init__(self):
   super(CustomerLayer, self).__init__()
   self.input_spec = input_spec.InputSpec(shape=(None, 3))
Ejemplo n.º 19
0
 def __init__(self):
   super(CustomerLayer, self).__init__()
   self.input_spec = input_spec.InputSpec(axes={-1: 2})