Ejemplo n.º 1
0
  def _streaming_internal_state(self, inputs):
    outputs = super(Conv1DTranspose, self).call(inputs)

    if self.overlap == 0:
      if self.crop_output:
        return tf.identity(outputs[:, 0:self.output_time_dim, :])
      else:
        return tf.identity(outputs)

    output_shape = outputs.shape.as_list()

    # need to add remainder state to a specific region of output as below:
    # outputs[:,0:self.overlap,:] = outputs[:,0:self.overlap,:] + self.states
    # but 'Tensor' object does not support item assignment,
    # so doing it through full summation below
    output_shape[1] -= self.state_shape[1]
    padded_remainder = tf.concat(
        [self.states, tf.zeros(output_shape, tf.float32)], 1)
    outputs = outputs + padded_remainder

    # extract remainder state and substruct bias if it is used:
    # bias will be added in the next iteration again and remainder
    # should have only convolution part, so that bias is not added twice
    if self.use_bias:
      new_state = outputs[:, -self.overlap:, :] - self.bias
    else:
      new_state = outputs[:, -self.overlap:, :]
    assign_states = self.states.assign(new_state)

    with tf.control_dependencies([assign_states]):
      if self.crop_output:
        return tf.identity(outputs[:, 0:self.output_time_dim, :])
      else:
        return tf.identity(outputs)
Ejemplo n.º 2
0
  def _streaming_internal_state(self, inputs):
    outputs = super(Conv1DTranspose, self).call(inputs)

    if self.overlap == 0:
      if self.crop_output:
        return tf.identity(outputs[:, 0:self.output_time_dim, :])
      else:
        return tf.identity(outputs)

    output_shape = outputs.shape.as_list()

    # need to add remainder state to a specific region of output as below:
    # outputs[:,0:self.overlap,:] = outputs[:,0:self.overlap,:] + self.states
    # but 'Tensor' object does not support item assignment,
    # so doing it through full summation below
    output_shape[1] -= self.state_shape[1]
    padded_remainder = tf.concat(
        [self.states, tf.zeros(output_shape, tf.float32)], 1)
    outputs = outputs + padded_remainder

    new_state = outputs[:, -self.overlap:, :]
    assign_states = self.states.assign(new_state)

    with tf.control_dependencies([assign_states]):
      if self.crop_output:
        return tf.identity(outputs[:, 0:self.output_time_dim, :])
      else:
        return tf.identity(outputs)
Ejemplo n.º 3
0
 def _streaming_internal_state(self, inputs):
     inversed_frames, new_states = self._streaming_external_state(
         inputs, self.states)
     assign_states = self.states.assign(new_states)
     with tf.control_dependencies([assign_states]):
         # use tf.identity to ensure that assign_states is executed
         return tf.identity(inversed_frames)
Ejemplo n.º 4
0
    def _streaming_internal_state(self, inputs):
        memory = tf.keras.backend.concatenate([self.states, inputs], 1)
        outputs = memory[:, 0:inputs.shape.as_list()[1], :]
        new_memory = memory[:, -self.delay:, :]
        assign_states = self.states.assign(new_memory)

        with tf.control_dependencies([assign_states]):
            return tf.identity(outputs)
Ejemplo n.º 5
0
  def _streaming_internal_state(self, inputs):
    if isinstance(self.get_core_layer(), tf.keras.layers.Conv2DTranspose):
      outputs = self.cell(inputs)

      if self.ring_buffer_size_in_time_dim == 0:
        if self.transposed_conv_crop_output:
          outputs = outputs[:, 0:self.output_time_dim]
        return outputs

      output_shape = outputs.shape.as_list()

      # need to add remainder state to a specific region of output as below:
      # outputs[:,0:self.ring_buffer_size_in_time_dim,:] =
      # outputs[:,0:self.ring_buffer_size_in_time_dim,:] + self.states
      # but 'Tensor' object does not support item assignment,
      # so doing it through full summation below
      output_shape[1] -= self.state_shape[1]
      padded_remainder = tf.concat(
          [self.states, tf.zeros(output_shape, tf.float32)], 1)
      outputs = outputs + padded_remainder

      # extract remainder state and subtract bias if it is used:
      # bias will be added in the next iteration again and remainder
      # should have only convolution part, so that bias is not added twice
      if self.get_core_layer().get_config()['use_bias']:
        # need to access bias of the cell layer,
        # where cell can be wrapped by wrapper layer
        bias = self.get_core_layer().bias
        new_state = outputs[:, -self.ring_buffer_size_in_time_dim:, :] - bias  # pylint: disable=invalid-unary-operand-type
      else:
        new_state = outputs[:, -self.ring_buffer_size_in_time_dim:, :]  # pylint: disable=invalid-unary-operand-type
      assign_states = self.states.assign(new_state)

      with tf.control_dependencies([assign_states]):
        if self.transposed_conv_crop_output:
          return tf.identity(outputs[:, 0:self.output_time_dim, :])
        else:
          return tf.identity(outputs)
    else:
      if self.use_one_step:
        # The time dimenstion always has to equal 1 in streaming mode.
        if inputs.shape[1] != 1:
          raise ValueError('inputs.shape[1]: %d must be 1 ' % inputs.shape[1])

        # remove latest row [batch_size, (memory_size-1), feature_dim, channel]
        memory = self.states[:, 1:self.ring_buffer_size_in_time_dim, :]

        # add new row [batch_size, memory_size, feature_dim, channel]
        memory = tf.keras.backend.concatenate([memory, inputs], 1)

        assign_states = self.states.assign(memory)

        with tf.control_dependencies([assign_states]):
          return self.cell(memory)
      else:
        # add new row [batch_size, memory_size, feature_dim, channel]
        if self.ring_buffer_size_in_time_dim:
          memory = tf.keras.backend.concatenate([self.states, inputs], 1)

          state_update = memory[:, -self.ring_buffer_size_in_time_dim:, :]  # pylint: disable=invalid-unary-operand-type

          assign_states = self.states.assign(state_update)

          with tf.control_dependencies([assign_states]):
            return self.cell(memory)
        else:
          return self.cell(inputs)