def _run_non_stream_model(self):
        # below model expects that input_data are already initialized in tu.TestBase
        # in setUp, by default input_data should have 3 dimensions
        # size of each dimesnion is constant and is defiend by self.weights
        mode = Modes.TRAINING
        input_tf = tf.keras.layers.Input(shape=(
            None,
            self.input_data.shape[2],
        ))

        dense1 = tf.keras.layers.Dense(units=self.weights[0].shape[1],
                                       use_bias=False)
        depthwisecnn1 = depthwiseconv1d.DepthwiseConv1D(
            memory_size=self.memory_size,
            inference_batch_size=self.batch_size,
            mode=mode)

        output_dense1_tf = dense1(input_tf)
        output_tf = depthwisecnn1(inputs=output_dense1_tf)
        dense1.set_weights([self.weights[0]])
        depthwisecnn1.set_weights([self.weights[1], self.weights[2]])
        model_tf = tf.keras.models.Model(input_tf, output_tf)

        output_np = model_tf.predict(self.input_data)

        return output_np, model_tf
    def test_streaming_inference_internal_state(self):

        output_non_stream_np, _ = self._run_non_stream_model()

        mode = Modes.STREAM_INTERNAL_STATE_INFERENCE
        input_tf = tf.keras.layers.Input(shape=(
            1,
            self.input_data.shape[2],
        ),
                                         batch_size=self.batch_size)
        dense1 = tf.keras.layers.Dense(units=self.weights[0].shape[1],
                                       use_bias=False)
        depthwisecnn1 = depthwiseconv1d.DepthwiseConv1D(
            memory_size=self.memory_size,
            inference_batch_size=self.batch_size,
            mode=mode)

        output_dense1_tf = dense1(input_tf)
        output_tf = depthwisecnn1(inputs=output_dense1_tf)
        model = tf.keras.Model(input_tf, output_tf)

        input_states_np = np.zeros(
            [self.batch_size, self.memory_size, self.weights[1].shape[-1]])
        dense1.set_weights([self.weights[0]])
        depthwisecnn1.set_weights(
            [self.weights[1], self.weights[2], input_states_np])

        # streaming emulation: loop over every element in time
        for i in range(self.input_data.shape[1]):
            input_batch_np = self.input_data[:, i, :]
            input_batch_np = np.expand_dims(input_batch_np, 1)
            output_np = model.predict(input_batch_np)
            for b in range(self.input_data.shape[0]):  # loop over batch
                self.assertAllClose(output_np[b][0],
                                    output_non_stream_np[b][i])
Beispiel #3
0
  def build(self, input_shape):
    super(Svdf, self).build(input_shape)

    if self.mode == modes.Modes.TRAINING:
      self.dropout1 = non_scaling_dropout.NonScalingDropout(
          self.dropout, training=True)
    else:
      self.dropout1 = tf.keras.layers.Lambda(lambda x: x)
    self.dense1 = tf.keras.layers.Dense(
        units=self.units1, use_bias=self.use_bias1)
    self.depth_cnn1 = depthwiseconv1d.DepthwiseConv1D(
        memory_size=self.memory_size,
        inference_batch_size=self.inference_batch_size,
        use_bias=self.use_bias,
        mode=self.mode,
        pad=self.pad)
    if self.units2 > 0:
      self.dense2 = tf.keras.layers.Dense(units=self.units2, use_bias=True)
    else:
      self.dense2 = tf.keras.layers.Lambda(lambda x: x)

    if self.use_batch_norm:
      self.batch_norm = tf.keras.layers.BatchNormalization(scale=self.bn_scale)
    else:
      self.batch_norm = tf.keras.layers.Lambda(lambda x: x)