def _streaming_internal_state(self, inputs):
        if self.use_one_step:
            # The time dimenstion always has to equal 1 in streaming mode.
            if inputs.shape[1] != 1:
                raise ValueError('inputs.shape[1]: %d must be 1 ' %
                                 inputs.shape[1])

            # remove latest row [batch_size, (memory_size-1), feature_dim, channel]
            memory = self.states[:, 1:self.ring_buffer_size_in_time_dim, :]

            # add new row [batch_size, memory_size, feature_dim, channel]
            memory = tf.keras.backend.concatenate([memory, inputs], 1)

            assign_states = self.states.assign(memory)

            with tf.control_dependencies([assign_states]):
                return self.cell(memory)
        else:
            # add new row [batch_size, memory_size, feature_dim, channel]
            if self.ring_buffer_size_in_time_dim:
                memory = tf.keras.backend.concatenate([self.states, inputs], 1)

                state_update = memory[:,
                                      -self.ring_buffer_size_in_time_dim:, :]  # pylint: disable=invalid-unary-operand-type

                assign_states = self.states.assign(state_update)

                with tf.control_dependencies([assign_states]):
                    return self.cell(memory)
            else:
                return self.cell(inputs)
  def _streaming_internal_state(self, inputs):
    outputs = super(Conv1DTranspose, self).call(inputs)

    if self.overlap == 0:
      if self.crop_output:
        return tf.identity(outputs[:, 0:self.output_time_dim, :])
      else:
        return tf.identity(outputs)

    output_shape = outputs.shape.as_list()

    # need to add remainder state to a specific region of output as below:
    # outputs[:,0:self.overlap,:] = outputs[:,0:self.overlap,:] + self.states
    # but 'Tensor' object does not support item assignment,
    # so doing it through full summation below
    output_shape[1] -= self.state_shape[1]
    padded_remainder = tf.concat(
        [self.states, tf.zeros(output_shape, tf.float32)], 1)
    outputs = outputs + padded_remainder

    # extract remainder state and substruct bias if it is used:
    # bias will be added in the next iteration again and remainder
    # should have only convolution part, so that bias is not added twice
    if self.use_bias:
      new_state = outputs[:, -self.overlap:, :] - self.bias
    else:
      new_state = outputs[:, -self.overlap:, :]
    assign_states = self.states.assign(new_state)

    with tf.control_dependencies([assign_states]):
      if self.crop_output:
        return tf.identity(outputs[:, 0:self.output_time_dim, :])
      else:
        return tf.identity(outputs)
    def _streaming_internal_state(self, inputs):
        # depthwise 1D convolution in streaming mode with internal state
        # it is used for streaming inference
        if inputs.shape[1] != 1:  # [batch, 1, feature]
            raise ValueError('inputs.shape[1]: %d must be 1 ' %
                             inputs.shape[1])

        # remove latest row [batch_size, (memory_size-1), feature_dim]
        memory = self.states[:, 1:self.memory_size, :]

        # add new row [batch_size, memory_size, feature_dim]
        memory = tf.keras.backend.concatenate([memory, inputs], 1)
        assign_states = self.states.assign(memory)

        with tf.control_dependencies([assign_states]):
            # elementwise multiplication [batch_size, memory_size, feature_dim]
            output = memory * self.time_kernel

            # [batch_size, feature_dim]
            output_sum = tf.keras.backend.sum(output, axis=1)

            if self.use_bias:
                output_sum = output_sum + self.bias

            output_sum = tf.keras.backend.expand_dims(output_sum, -2)

            return output_sum  # [batch, 1, feature]
Beispiel #4
0
    def _streaming_internal_state(self, inputs):
        # first dimension is batch size
        if inputs.shape[0] != self.inference_batch_size:
            raise ValueError(
                'inputs.shape[0]:%d must be = self.inference_batch_size:%d' %
                (inputs.shape[0], self.inference_batch_size))

        # second dimension is frame_step
        if inputs.shape[1] != self.frame_step:
            raise ValueError(
                'inputs.shape[1]:%d must be = self.frame_step:%d' %
                (inputs.shape[1], self.frame_step))

        # remove latest rows [batch_size, (frame_size-frame_step)]
        memory = self.states[:, self.frame_step:self.frame_size]

        # add new rows [batch_size, memory_size]
        memory = tf.keras.backend.concatenate([memory, inputs], 1)

        assign_states = self.states.assign(memory)

        with tf.control_dependencies([assign_states]):
            # add time dim
            output_frame = tf.keras.backend.expand_dims(memory, -2)
            return output_frame
 def _streaming_internal_state(self, inputs):
     inversed_frames, new_states = self._streaming_external_state(
         inputs, self.states)
     assign_states = self.states.assign(new_states)
     with tf.control_dependencies([assign_states]):
         # use tf.identity to ensure that assign_states is executed
         return tf.identity(inversed_frames)
  def _streaming_internal_state(self, inputs):
    outputs = super(Conv1DTranspose, self).call(inputs)

    if self.overlap == 0:
      if self.crop_output:
        return tf.identity(outputs[:, 0:self.output_time_dim, :])
      else:
        return tf.identity(outputs)

    output_shape = outputs.shape.as_list()

    # need to add remainder state to a specific region of output as below:
    # outputs[:,0:self.overlap,:] = outputs[:,0:self.overlap,:] + self.states
    # but 'Tensor' object does not support item assignment,
    # so doing it through full summation below
    output_shape[1] -= self.state_shape[1]
    padded_remainder = tf.concat(
        [self.states, tf.zeros(output_shape, tf.float32)], 1)
    outputs = outputs + padded_remainder

    new_state = outputs[:, -self.overlap:, :]
    assign_states = self.states.assign(new_state)

    with tf.control_dependencies([assign_states]):
      if self.crop_output:
        return tf.identity(outputs[:, 0:self.output_time_dim, :])
      else:
        return tf.identity(outputs)
    def _streaming_internal_state(self, inputs):
        memory = tf.keras.backend.concatenate([self.states, inputs], 1)
        outputs = memory[:, 0:inputs.shape.as_list()[1], :]
        new_memory = memory[:, -self.delay:, :]
        assign_states = self.states.assign(new_memory)

        with tf.control_dependencies([assign_states]):
            return tf.identity(outputs)
Beispiel #8
0
    def _streaming_internal_state(self, inputs):
        new_state = self.state + 1.0
        new_state = tf.math.minimum(new_state, self.max_counter + 1)

        assign_state = self.state.assign(new_state)

        with tf.control_dependencies([assign_state]):
            multiplier = tf.keras.activations.relu(new_state[0][0][0],
                                                   max_value=1.0,
                                                   threshold=self.max_counter)
            outputs = tf.multiply(inputs, multiplier)
            return outputs
Beispiel #9
0
    def _streaming_internal_state(self, inputs):
        # first dimension is batch size
        if inputs.shape[0] != self.inference_batch_size:
            raise ValueError(
                'inputs.shape[0]:%d must be = self.inference_batch_size:%d' %
                (inputs.shape[0], self.inference_batch_size))

        if self.use_one_step:
            # use_one_step is used only for backward compatibility
            # it assumes that frame_size and frame_step overlap
            # below version with tf.signal.frame is more generic

            # second dimension is frame_step
            if inputs.shape[1] != self.frame_step:
                raise ValueError(
                    'inputs.shape[1]:%d must be = self.frame_step:%d' %
                    (inputs.shape[1], self.frame_step))

            # remove latest rows [batch_size, (frame_size-frame_step)]
            memory = self.states[:, self.frame_step:self.frame_size]

            # add new rows [batch_size, memory_size]
            memory = tf.keras.backend.concatenate([memory, inputs], 1)

            assign_states = self.states.assign(memory)

            with tf.control_dependencies([assign_states]):
                # add time dim
                output_frame = tf.keras.backend.expand_dims(memory, -2)
                return output_frame
        else:
            memory = tf.keras.backend.concatenate([self.states, inputs], 1)
            state_update = memory[:, -self.ring_buffer_size_in_time_dim:]
            assign_states = self.states.assign(state_update)

            with tf.control_dependencies([assign_states]):
                output_frame = tf.signal.frame(memory,
                                               frame_length=self.frame_size,
                                               frame_step=self.frame_step)
                return output_frame
Beispiel #10
0
  def _streaming_internal_state(self, inputs):
    # The time dimenstion always has to equal 1 in streaming mode.
    if inputs.shape[1] != 1:
      raise ValueError('inputs.shape[1]: %d must be 1 ' % inputs.shape[1])

    # remove latest row [batch_size, (memory_size-1), feature_dim, channel]
    memory = self.states[:, 1:self.effective_ksize_tdim, :]

    # add new row [batch_size, memory_size, feature_dim, channel]
    memory = tf.keras.backend.concatenate([memory, inputs], 1)

    assign_states = self.states.assign(memory)

    with tf.control_dependencies([assign_states]):
      return self.cell(memory)
Beispiel #11
0
  def _streaming_internal_state(self, inputs):
    # first dimension is batch size
    if inputs.shape[0] != self.inference_batch_size:
      raise ValueError(
          'inputs.shape[0]:%d must be = self.inference_batch_size:%d' %
          (inputs.shape[0], self.inference_batch_size))

    # receive inputs: [batch, 1, feature]
    # convert it for gru cell to inputs1: [batch, feature]
    inputs = tf.keras.backend.squeeze(inputs, axis=1)
    output, states = self.gru_cell(inputs, [self.input_state])
    # update internal states
    assign_state = self.input_state.assign(states[0])

    with tf.control_dependencies([assign_state]):
      # output [batch, 1, feature]
      output = tf.keras.backend.expand_dims(output, axis=1)
      return output
Beispiel #12
0
  def _streaming_internal_state(self, inputs):
    if isinstance(self.get_core_layer(), tf.keras.layers.Conv2DTranspose):
      outputs = self.cell(inputs)

      if self.ring_buffer_size_in_time_dim == 0:
        if self.transposed_conv_crop_output:
          outputs = outputs[:, 0:self.output_time_dim]
        return outputs

      output_shape = outputs.shape.as_list()

      # need to add remainder state to a specific region of output as below:
      # outputs[:,0:self.ring_buffer_size_in_time_dim,:] =
      # outputs[:,0:self.ring_buffer_size_in_time_dim,:] + self.states
      # but 'Tensor' object does not support item assignment,
      # so doing it through full summation below
      output_shape[1] -= self.state_shape[1]
      padded_remainder = tf.concat(
          [self.states, tf.zeros(output_shape, tf.float32)], 1)
      outputs = outputs + padded_remainder

      # extract remainder state and subtract bias if it is used:
      # bias will be added in the next iteration again and remainder
      # should have only convolution part, so that bias is not added twice
      if self.get_core_layer().get_config()['use_bias']:
        # need to access bias of the cell layer,
        # where cell can be wrapped by wrapper layer
        bias = self.get_core_layer().bias
        new_state = outputs[:, -self.ring_buffer_size_in_time_dim:, :] - bias  # pylint: disable=invalid-unary-operand-type
      else:
        new_state = outputs[:, -self.ring_buffer_size_in_time_dim:, :]  # pylint: disable=invalid-unary-operand-type
      assign_states = self.states.assign(new_state)

      with tf.control_dependencies([assign_states]):
        if self.transposed_conv_crop_output:
          return tf.identity(outputs[:, 0:self.output_time_dim, :])
        else:
          return tf.identity(outputs)
    else:
      if self.use_one_step:
        # The time dimenstion always has to equal 1 in streaming mode.
        if inputs.shape[1] != 1:
          raise ValueError('inputs.shape[1]: %d must be 1 ' % inputs.shape[1])

        # remove latest row [batch_size, (memory_size-1), feature_dim, channel]
        memory = self.states[:, 1:self.ring_buffer_size_in_time_dim, :]

        # add new row [batch_size, memory_size, feature_dim, channel]
        memory = tf.keras.backend.concatenate([memory, inputs], 1)

        assign_states = self.states.assign(memory)

        with tf.control_dependencies([assign_states]):
          return self.cell(memory)
      else:
        # add new row [batch_size, memory_size, feature_dim, channel]
        if self.ring_buffer_size_in_time_dim:
          memory = tf.keras.backend.concatenate([self.states, inputs], 1)

          state_update = memory[:, -self.ring_buffer_size_in_time_dim:, :]  # pylint: disable=invalid-unary-operand-type

          assign_states = self.states.assign(state_update)

          with tf.control_dependencies([assign_states]):
            return self.cell(memory)
        else:
          return self.cell(inputs)