def test_streaming_inference_external_state(self):

        output_non_stream_np, model_tf = self._run_non_stream_model()

        mode = Modes.STREAM_EXTERNAL_STATE_INFERENCE
        input_tensors = [
            tf.keras.layers.Input(shape=(
                1,
                self.input_data.shape[2],
            ),
                                  batch_size=self.batch_size,
                                  name="inp1")
        ]

        # convert non streaming trainable model to a streaming one
        model_stream = utils.convert_to_inference_model(
            model_tf, input_tensors, mode)
        input_states_np = np.zeros(
            [self.batch_size, self.memory_size, self.weights[1].shape[-1]])

        # streaming emulation: loop over every element in time
        for i in range(self.input_data.shape[1]):
            input_batch_np = self.input_data[:, i, :]
            input_batch_np = np.expand_dims(input_batch_np, 1)
            output_np, output_states_np = model_stream.predict(
                [input_batch_np, input_states_np])
            input_states_np = output_states_np
            for b in range(self.input_data.shape[0]):  # loop over batch
                self.assertAllClose(output_np[b][0],
                                    output_non_stream_np[b][i])
Exemplo n.º 2
0
    def test_tf_non_streaming_vs_streaming_inference_external_state(self):
        """Tests non stream inference vs stream inference with external state."""
        speech_params = speech_features.SpeechFeatures.get_params(self.params)
        mode = modes.Modes.NON_STREAM_INFERENCE
        # TF non streaming frame extraction based on tf.signal.frame
        mel_speech_tf = speech_features.SpeechFeatures(
            speech_params, mode, self.inference_batch_size)
        # it receives all data with size: data_size
        input1 = tf.keras.layers.Input(shape=(self.data_size, ),
                                       batch_size=self.inference_batch_size,
                                       dtype=tf.float32)
        output1 = mel_speech_tf(input1)
        model_tf = tf.keras.models.Model(input1, output1)

        # generate frames for the whole signal (no streaming here)
        output_tf = model_tf.predict(self.signal)

        # input data for streaming mode
        input_tensors = [
            tf.keras.layers.Input(shape=(self.frame_step, ),
                                  batch_size=self.inference_batch_size,
                                  dtype=tf.float32)
        ]

        # convert non streaming trainable model to
        # streaming inference with external state
        mode = modes.Modes.STREAM_EXTERNAL_STATE_INFERENCE
        model_stream = utils.convert_to_inference_model(
            model_tf, input_tensors, mode)

        # initialize state of streaming model
        pre_state = self.signal[:, 0:self.frame_size - self.frame_step]
        state2 = np.concatenate((np.zeros(shape=(1, self.frame_step),
                                          dtype=np.float32), pre_state),
                                axis=1)

        # run streaming frames extraction
        start = self.frame_size - self.frame_step
        end = self.frame_size
        streamed_frames = []
        while end <= self.data_size:
            # next data update
            stream_update = self.signal[:, start:end]

            # get new frame from stream of data
            output_frame, output_state = model_stream.predict(
                [stream_update, state2])
            state2 = output_state
            streamed_frames.append(output_frame)

            # update indexes of streamed updates
            start = end
            end = start + self.frame_step

        # compare streaming vs non streaming frames extraction
        for i in range(len(streamed_frames)):
            self.assertAllClose(streamed_frames[i][0][0],
                                output_tf[0][i],
                                rtol=1e-4,
                                atol=1e-4)
Exemplo n.º 3
0
    def test_streaming_inference_external_state(self):
        # test on input with [batch, time, feature1]
        self.init(shape=(8, 2))
        # convert non streamable trainable model to streamable with external state
        mode = Modes.STREAM_EXTERNAL_STATE_INFERENCE
        input_tensors = [
            tf.keras.layers.Input(shape=(
                1,
                2,
            ),
                                  batch_size=self.batch_size,
                                  name="inp1")
        ]
        model_stream = utils.convert_to_inference_model(
            self.model_train, input_tensors, mode)

        input_state = np.zeros(model_stream.inputs[1].shape, dtype=np.float32)

        # run streaming inference
        for i in range(self.inputs.shape[1]):
            input_stream = np.expand_dims(self.inputs[0][i], 0)
            input_stream = np.expand_dims(input_stream, 1)
            output_stream, output_state = model_stream.predict(
                [input_stream, input_state])
            input_state = output_state  # update input state
        self.assertAllEqual(output_stream, self.outputs)
Exemplo n.º 4
0
 def test_inference_internal_state(self):
   mode = modes.Modes.STREAM_INTERNAL_STATE_INFERENCE
   input_tensors = [
       tf.keras.layers.Input(
           shape=(
               1,
               self.feature_size,
           ),
           batch_size=1,
           name='inp_stream')
   ]
   # convert non streaming model to streaming one
   model_stream = utils.convert_to_inference_model(self.model,
                                                   input_tensors, mode)
   model_stream.summary()
   # confirm that it returns zeros
   for i in range(self.max_counter):
     i = i % self.input_non_stream_np.shape[1]
     input_stream_np = self.input_non_stream_np[:, i, :]
     input_stream_np = np.expand_dims(input_stream_np, 1)
     self.assertAllEqual(
         np.zeros_like(input_stream_np), model_stream.predict(input_stream_np))
   # confirm that after self.max_counter iterations it returns input
   for _ in range(self.max_counter):
     self.assertAllClose(
         input_stream_np, model_stream.predict(input_stream_np))
Exemplo n.º 5
0
    def test_inference_external_state(self, max_counter):
        inputs = tf.keras.layers.Input(shape=(
            self.time_size,
            self.feature_size,
        ),
                                       batch_size=1)
        net = counter.Counter(max_counter=max_counter)(inputs)
        model = tf.keras.Model(inputs, net)

        mode = modes.Modes.STREAM_EXTERNAL_STATE_INFERENCE
        input_tensors = [
            tf.keras.layers.Input(shape=(
                1,
                self.feature_size,
            ),
                                  batch_size=1,
                                  name="inp_stream")
        ]
        # convert non streaming model to streaming one
        model_stream = utils.convert_to_inference_model(
            model, input_tensors, mode)
        model_stream.summary()

        # second input to stream model is a state, so we can use its shape
        input_state_np = np.zeros(model_stream.inputs[1].shape,
                                  dtype=np.float32)

        # confirm that it returns zeros in the first max_counter iterations
        for i in range(max_counter):
            i = i % self.input_non_stream_np.shape[1]
            input_stream_np = self.input_non_stream_np[:, i, :]
            input_stream_np = np.expand_dims(input_stream_np, 1)
            output_stream_np, output_state_np = model_stream.predict(
                [input_stream_np, input_state_np])
            input_state_np = output_state_np  # update input state
            self.assertAllEqual(np.zeros_like(input_stream_np),
                                output_stream_np)

        # confirm that after self.max_counter iterations it returns input tensor
        for i in range(max_counter):
            i = i % self.input_non_stream_np.shape[1]
            input_stream_np = self.input_non_stream_np[:, i, :]
            input_stream_np = np.expand_dims(input_stream_np, 1)
            output_stream_np, output_state_np = model_stream.predict(
                [input_stream_np, input_state_np])
            input_state_np = output_state_np  # update input state
            self.assertAllClose(input_stream_np, output_stream_np)
Exemplo n.º 6
0
    def test_streaming_external_state(self):
        # Streaming convolution with external state - used for inference.
        # Create a non-streaming model first
        mode = modes.Modes.TRAINING
        layer = self._get_conv2d_layer(mode)
        inputs = tf.keras.layers.Input(shape=(self.time_dim, self.feature_dim,
                                              1),
                                       batch_size=self.batch_size)
        outputs = layer(inputs)
        model = tf.keras.Model(inputs, outputs)

        # Swap to streaming mode
        mode = modes.Modes.STREAM_EXTERNAL_STATE_INFERENCE
        input_tensors = [
            tf.keras.Input(shape=(
                1,
                self.feature_dim,
                1,
            ),
                           batch_size=self.batch_size,
                           name='inp1')
        ]

        # Initialize the first state with zeros.
        input_states = np.zeros(
            [self.batch_size, self.kernel_size[0], self.feature_dim, 1])

        # Use the pipeline to convert the model into streaming external state
        model_stream = utils.convert_to_inference_model(
            model, input_tensors, mode)

        # Simulating streaming using a for loop
        for i in range(self.time_dim):
            input_feature = self.inputs[:, i, :, :]
            input_feature = np.expand_dims(input_feature, 1)
            output_np, output_states = model_stream.predict(
                [input_feature, input_states])

            # Propagate the output state as the input state of the next iteration.
            input_states = output_states
            for b in range(self.batch_size):  # loop over batch
                self.assertAllClose(output_np[b][0],
                                    self.expected_base_output[b][i])
Exemplo n.º 7
0
    def test_tf_non_streaming_vs_streaming_external_state(self):
        mode = Modes.STREAM_EXTERNAL_STATE_INFERENCE
        input_tensors = [
            tf.keras.layers.Input(shape=(self.frame_step, ),
                                  batch_size=1,
                                  name="inp1")
        ]

        # convert non streaming trainable model to a streaming one
        model_stream = utils.convert_to_inference_model(
            self.model_tf, input_tensors, mode)

        # initialize input state of streaming data framer
        pre_state = self.signal[:, 0:self.frame_size - self.frame_step]
        states = np.concatenate((np.zeros(shape=(1, self.frame_step),
                                          dtype=np.float32), pre_state),
                                axis=1)

        start = self.frame_size - self.frame_step
        end = self.frame_size
        streamed_frames = []

        # run streaming frames extraction
        while end <= self.data_size:
            # next data update
            stream_update = self.signal[:, start:end]

            # get new frame from stream of data
            output_frame, new_states = model_stream.predict(
                [stream_update, states])
            # update frame states and feed it as input in the next iteration
            states = new_states

            streamed_frames.append(output_frame)

            start = end
            end = start + self.frame_step

        # compare streaming vs non streaming frames extraction
        for i in range(0, len(self.output_frames_tf[0])):
            self.assertAllEqual(streamed_frames[i][0][0],
                                self.output_frames_tf[0][i])
Exemplo n.º 8
0
    def test_streaming_inference_external_state(self):

        with tf1.Session() as sess:
            output_non_stream_np, model_tf = self._run_non_stream_model()

            # input data for streaming stateless model
            input_tensors = [
                tf.keras.layers.Input(shape=(
                    1,
                    self.input_data.shape[2],
                ),
                                      batch_size=self.batch_size,
                                      dtype=tf.float32)
            ]

            # convert non streaming model to streaming one with external state
            mode = modes.Modes.STREAM_EXTERNAL_STATE_INFERENCE
            model_stream = utils.convert_to_inference_model(
                model_tf, input_tensors, mode)

            # validate that model is convertable to tflite
            converter = tf1.lite.TFLiteConverter.from_session(
                sess, model_stream.inputs, model_stream.outputs)
            self.assertTrue(converter.convert())

            inputs = []
            for s in range(len(model_stream.inputs)):
                inputs.append(
                    np.zeros(model_stream.inputs[s].shape, dtype=np.float32))

            # streaming emulation: loop over every element in time
            for i in range(self.input_data.shape[1]):
                input_batch_np = self.input_data[:, i, :]
                input_batch_np = np.expand_dims(input_batch_np, 1)
                inputs[0] = input_batch_np
                outputs = model_stream.predict(inputs)
                # input_states_np = output_states_np
                for s in range(1, len(model_stream.inputs)):
                    inputs[s] = outputs[s]
                for b in range(self.input_data.shape[0]):  # loop over batch
                    self.assertAllClose(outputs[0][b][0],
                                        output_non_stream_np[b][i])
Exemplo n.º 9
0
 def test_streaming_inference_internal_state(self):
   # test on input with [batch, time, feature1, feature2]
   self.init(shape=(8, 2, 1))
   # convert non streamable trainable model to streamable with internal state
   mode = modes.Modes.STREAM_INTERNAL_STATE_INFERENCE
   input_tensors = [
       tf.keras.layers.Input(
           shape=(
               1,
               2,
               1,
           ), batch_size=self.batch_size, name="inp1")
   ]
   model_stream = utils.convert_to_inference_model(self.model_train,
                                                   input_tensors, mode)
   # run streaming inference
   for i in range(self.inputs.shape[1]):
     input_stream = np.expand_dims(self.inputs[0][i], 0)
     input_stream = np.expand_dims(input_stream, 1)
     output_stream = model_stream.predict(input_stream)
   self.assertAllEqual(output_stream, self.outputs)
Exemplo n.º 10
0
    def test_streaming_with_effective_tdim(self):
        time_size = 10
        feature_size = 3
        batch_size = 1

        time_dim = 1  # index of time dimensions
        ring_buffer_size_in_time_dim = 3  # effective size of aperture in time dim

        inputs = tf.keras.layers.Input(shape=(time_size, feature_size),
                                       batch_size=batch_size,
                                       name='inp_sequence')

        mode = modes.Modes.TRAINING

        # in streaming mode it will create a
        # ring buffer with time dim size ring_buffer_size_in_time_dim
        outputs = stream.Stream(
            cell=Sum(time_dim=time_dim),
            mode=mode,
            ring_buffer_size_in_time_dim=ring_buffer_size_in_time_dim)(inputs)
        model_train = tf.keras.Model(inputs, outputs)
        model_train.summary()

        mode = modes.Modes.STREAM_EXTERNAL_STATE_INFERENCE
        input_tensors = [
            tf.keras.layers.Input(
                shape=(
                    1,  # time dim is size 1 in streaming mode
                    feature_size,
                ),
                batch_size=batch_size,
                name='inp_stream')
        ]
        # convert non streaming model to streaming one
        model_stream = utils.convert_to_inference_model(
            model_train, input_tensors, mode)
        model_stream.summary()

        # second input tostream model is a state, so we can use its shape
        input_state_np = np.zeros(model_stream.inputs[1].shape,
                                  dtype=np.float32)

        # input test data
        non_stream_input = np.random.randint(1,
                                             10,
                                             size=(batch_size, time_size,
                                                   feature_size))

        # run streaming inference
        # iterate over time dim sample by sample
        for i in range(input_state_np.shape[1]):
            input_stream_np = np.expand_dims(non_stream_input[0][i], 0)
            input_stream_np = np.expand_dims(input_stream_np, 1)
            input_stream_np = input_stream_np.astype(np.float32)
            output_stream_np, output_state_np = model_stream.predict(
                [input_stream_np, input_state_np])
            input_state_np = output_state_np  # update input state

            # emulate sliding window summation
            target = np.sum(
                non_stream_input[:,
                                 max(0, i - ring_buffer_size_in_time_dim):i +
                                 1],
                axis=time_dim)
            self.assertAllEqual(target, output_stream_np)

        # validate name tag of model's state
        expected_str = 'ExternalState'
        self.assertAllEqual(
            expected_str,
            model_stream.inputs[1].name.split('/')[-1][:len(expected_str)])