Exemplo n.º 1
0
    def build(self, input_shape):
        super(Svdf, self).build(input_shape)

        if self.mode == modes.Modes.TRAINING:
            self.dropout1 = non_scaling_dropout.NonScalingDropout(self.dropout)
        else:
            self.dropout1 = tf.keras.layers.Lambda(lambda x, training: x)
        self.dense1 = tf.keras.layers.Dense(units=self.units1,
                                            use_bias=self.use_bias1)
        self.depth_cnn1 = stream.Stream(
            cell=tf.keras.layers.DepthwiseConv2D(kernel_size=(self.memory_size,
                                                              1),
                                                 strides=(1, 1),
                                                 padding='valid',
                                                 dilation_rate=(1, 1),
                                                 use_bias=self.use_bias),
            inference_batch_size=self.inference_batch_size,
            mode=self.mode,
            use_one_step=False,
            pad_time_dim=self.pad)
        if self.units2 > 0:
            self.dense2 = tf.keras.layers.Dense(units=self.units2,
                                                use_bias=True)
        else:
            self.dense2 = tf.keras.layers.Lambda(lambda x, training: x)

        if self.use_batch_norm:
            self.batch_norm = tf.keras.layers.BatchNormalization(
                scale=self.bn_scale)
        else:
            self.batch_norm = tf.keras.layers.Lambda(lambda x, training: x)
Exemplo n.º 2
0
  def build(self, input_shape):
    super(Svdf, self).build(input_shape)

    if self.mode == modes.Modes.TRAINING:
      self.dropout1 = non_scaling_dropout.NonScalingDropout(
          self.dropout, training=True)
    else:
      self.dropout1 = tf.keras.layers.Lambda(lambda x: x)
    self.dense1 = tf.keras.layers.Dense(
        units=self.units1, use_bias=self.use_bias1)
    self.depth_cnn1 = depthwiseconv1d.DepthwiseConv1D(
        memory_size=self.memory_size,
        inference_batch_size=self.inference_batch_size,
        use_bias=self.use_bias,
        mode=self.mode,
        pad=self.pad)
    if self.units2 > 0:
      self.dense2 = tf.keras.layers.Dense(units=self.units2, use_bias=True)
    else:
      self.dense2 = tf.keras.layers.Lambda(lambda x: x)

    if self.use_batch_norm:
      self.batch_norm = tf.keras.layers.BatchNormalization(scale=self.bn_scale)
    else:
      self.batch_norm = tf.keras.layers.Lambda(lambda x: x)
Exemplo n.º 3
0
    def test_non_scaling_dropout_noise_shape_inference(self):
        training = True
        keep_prob = 0.5
        layer = non_scaling_dropout.NonScalingDropout(rate=(1 - keep_prob),
                                                      seed=self.seed)
        layer(self.test_inputs, training=training)

        # When given no input shape, check that the shape is inferred from the input
        self.assertAllEqual(self.test_inputs.shape,
                            self.sess.run(layer.noise_shape))
Exemplo n.º 4
0
    def test_non_scaling_dropout_keep_prob_is_one(self):
        training = True
        keep_prob = 1
        input_shape = self.test_inputs.shape
        noise_shape = [input_shape[0], 1, input_shape[2]]

        # Keras implementation
        layer = non_scaling_dropout.NonScalingDropout(rate=(1 - keep_prob),
                                                      noise_shape=noise_shape,
                                                      seed=self.seed)
        output = layer(self.test_inputs, training=training)

        # since keep_prob is 1, shouldn't affect the result
        self.assertAllClose(self.test_inputs, output)
Exemplo n.º 5
0
    def test_non_scaling_dropout_inference(self):
        training = False
        keep_prob = 0.75
        input_shape = self.test_inputs.shape
        noise_shape = [input_shape[0], 1, input_shape[2]]

        # Keras implementation
        layer = non_scaling_dropout.NonScalingDropout(rate=(1 - keep_prob),
                                                      noise_shape=noise_shape,
                                                      seed=self.seed)
        output = layer(self.test_inputs, training=training)

        # When applied during inference, should not do anything
        self.assertAllClose(self.test_inputs, output)
Exemplo n.º 6
0
    def test_non_scaling_dropout_noise_broadcasting(self):
        training = True
        keep_prob = 0.5
        input_shape = [3, 5]

        # Set all the inputs to 1 so we can monitor the output
        inputs = np.ones(input_shape, dtype="float32")
        layer = non_scaling_dropout.NonScalingDropout(
            rate=(1 - keep_prob),
            noise_shape=[1, input_shape[1]],
            seed=self.seed)
        output = self.sess.run(layer(inputs, training=training))

        self.assertAllInSet(output, [0, 1])

        # Check that the row is broadcasted correctly
        row = output[0]
        for i in range(output.shape[0]):
            self.assertAllEqual(row, output[i])
Exemplo n.º 7
0
    def test_non_scaling_dropout_training(self):
        training = True
        keep_prob = 0.75
        input_shape = self.test_inputs.shape
        noise_shape = [input_shape[0], 1, input_shape[2]]

        # Keras implementation
        layer = non_scaling_dropout.NonScalingDropout(rate=(1 - keep_prob),
                                                      noise_shape=noise_shape,
                                                      seed=self.seed)
        actual_output = layer(self.test_inputs, training=training)

        # TF implementation
        expected_output = _original_non_scaling_dropout(
            self.test_inputs,
            keep_prob=keep_prob,
            noise_shape=noise_shape,
            seed=self.seed)
        # Test that the layer works in the same way as the original in the base-case
        self.assertAllClose(expected_output, actual_output)