Esempio n. 1
0
 def compute_mask(self, inputs, mask=None):
     # cases need to call the layer.compute_mask when input_mask is None:
     # Masking layer and Embedding layer with mask_zero
     input_shape = K.int_shape(inputs)
     if input_shape[0]:
         # batch size matters, we currently do not handle mask explicitly
         return mask
     inner_mask = mask
     if inner_mask is not None:
         inner_mask_shape = self._get_shape_tuple((-1, ), mask, 2)
         inner_mask = K.reshape(inner_mask, inner_mask_shape)
     inner_input_shape = self._get_shape_tuple((-1, ), inputs, 2)
     inner_inputs = array_ops.reshape(inputs, inner_input_shape)
     output_mask = self.layer.compute_mask(inner_inputs, inner_mask)
     if output_mask is None:
         if mask is None:
             return None
         # input_mask is not None, and output_mask is None:
         # we should return a not-None mask
         output_mask = mask
         for _ in range(2, len(K.int_shape(mask))):
             output_mask = K.any(output_mask, axis=-1)
     else:
         # output_mask is not None. We need to reshape it
         input_length = input_shape[1]
         if not input_length:
             input_length = K.shape(inputs)[1]
         output_mask_int_shape = K.int_shape(output_mask)
         if output_mask_int_shape is None:
             # if the output_mask does not have a static shape,
             # its shape must be the same as mask's
             if mask is not None:
                 output_mask_int_shape = K.int_shape(mask)
             else:
                 output_mask_int_shape = K.compute_output_shape(
                     input_shape)[:-1]
         output_mask_shape = self._get_shape_tuple(
             (-1, input_length), output_mask, 1, output_mask_int_shape[1:])
         output_mask = K.reshape(output_mask, output_mask_shape)
     return output_mask
Esempio n. 2
0
    def compute_mask(self, inputs, mask=None):
        """Computes an output mask tensor for Embedding layer.

    This is based on the inputs, mask, and the inner layer.
    If batch size is specified:
    Simply return the input `mask`. (An rnn-based implementation with
    more than one rnn inputs is required but not supported in tf.keras yet.)
    Otherwise we call `compute_mask` of the inner layer at each time step.
    If the output mask at each time step is not `None`:
    (E.g., inner layer is Masking or RNN)
    Concatenate all of them and return the concatenation.
    If the output mask at each time step is `None` and the input mask is not
    `None`:(E.g., inner layer is Dense)
    Reduce the input_mask to 2 dimensions and return it.
    Otherwise (both the output mask and the input mask are `None`):
    (E.g., `mask` is not used at all)
    Return `None`.

    Arguments:
      inputs: Tensor with shape [batch size, timesteps, ...] indicating the
          input to TimeDistributed. If static shape information is available for
          "batch size", `mask` is returned unmodified.
      mask: Either None (indicating no masking) or a Tensor indicating the
          input mask for TimeDistributed. The shape can be static or dynamic.

    Returns:
      Either None (no masking), or a [batch size, timesteps, ...] Tensor with
      an output mask for the TimeDistributed layer with the shape beyond the
      second dimension being the value of the input mask shape(if the computed
      output mask is none), an output mask with the shape beyond the first
      dimension being the value of the mask shape(if mask is not None) or
      output mask with the shape beyond the first dimension being the
      value of the computed output shape.

    """
        # cases need to call the layer.compute_mask when input_mask is None:
        # Masking layer and Embedding layer with mask_zero
        input_shape = K.int_shape(inputs)
        if input_shape[0]:
            # batch size matters, we currently do not handle mask explicitly
            return mask
        inner_mask = mask
        if inner_mask is not None:
            inner_mask_shape = self._get_shape_tuple((-1, ), mask, 2)
            inner_mask = K.reshape(inner_mask, inner_mask_shape)
        input_uid = generic_utils.object_list_uid(inputs)
        inner_inputs = self._input_map.get(input_uid, inputs)
        output_mask = self.layer.compute_mask(inner_inputs, inner_mask)
        if output_mask is None:
            if mask is None:
                return None
            # input_mask is not None, and output_mask is None:
            # we should return a not-None mask
            output_mask = mask
            for _ in range(2, len(K.int_shape(mask))):
                output_mask = K.any(output_mask, axis=-1)
        else:
            # output_mask is not None. We need to reshape it
            input_length = input_shape[1]
            if not input_length:
                input_length = K.shape(inputs)[1]
            output_mask_int_shape = K.int_shape(output_mask)
            if output_mask_int_shape is None:
                # if the output_mask does not have a static shape,
                # its shape must be the same as mask's
                if mask is not None:
                    output_mask_int_shape = K.int_shape(mask)
                else:
                    output_mask_int_shape = K.compute_output_shape(
                        input_shape)[:-1]
            output_mask_shape = self._get_shape_tuple(
                (-1, input_length), output_mask, 1, output_mask_int_shape[1:])
            output_mask = K.reshape(output_mask, output_mask_shape)
        return output_mask
Esempio n. 3
0
    def compute_mask(self, inputs, mask=None):
        """Computes an output mask tensor for Embedding layer.

    This is based on the inputs, mask, and the inner layer.
    If batch size is specified:
    Simply return the input `mask`. (An rnn-based implementation with
    more than one rnn inputs is required but not supported in tf.keras yet.)
    Otherwise we call `compute_mask` of the inner layer at each time step.
    If the output mask at each time step is not `None`:
    (E.g., inner layer is Masking or RNN)
    Concatenate all of them and return the concatenation.
    If the output mask at each time step is `None` and the input mask is not
    `None`:(E.g., inner layer is Dense)
    Reduce the input_mask to 2 dimensions and return it.
    Otherwise (both the output mask and the input mask are `None`):
    (E.g., `mask` is not used at all)
    Return `None`.

    Args:
      inputs: Tensor with shape [batch size, timesteps, ...] indicating the
        input to TimeDistributed. If static shape information is available for
        "batch size", `mask` is returned unmodified.
      mask: Either None (indicating no masking) or a Tensor indicating the
        input mask for TimeDistributed. The shape can be static or dynamic.

    Returns:
      Either None (no masking), or a [batch size, timesteps, ...] Tensor with
      an output mask for the TimeDistributed layer with the shape beyond the
      second dimension being the value of the input mask shape(if the computed
      output mask is none), an output mask with the shape beyond the first
      dimension being the value of the mask shape(if mask is not None) or
      output mask with the shape beyond the first dimension being the
      value of the computed output shape.

    """
        # cases need to call the layer.compute_mask when input_mask is None:
        # Masking layer and Embedding layer with mask_zero
        input_shape = nest.map_structure(
            lambda x: tensor_shape.TensorShape(backend.int_shape(x)), inputs)
        input_shape = tf_utils.convert_shapes(input_shape, to_tuples=False)
        batch_size = tf_utils.convert_shapes(input_shape)
        batch_size = nest.flatten(batch_size)[0]
        is_ragged_input = nest.map_structure(
            lambda x: isinstance(x, ragged_tensor.RaggedTensor), inputs)
        is_ragged_input = generic_utils.to_list(nest.flatten(is_ragged_input))
        if batch_size and not self._always_use_reshape or any(is_ragged_input):
            # batch size matters, we currently do not handle mask explicitly, or if
            # the layer always uses reshape approach, or the input is a ragged tensor.
            return mask
        inner_mask = mask
        if inner_mask is not None:
            inner_mask_shape = self._get_shape_tuple((-1, ), mask, 2)
            inner_mask = backend.reshape(inner_mask, inner_mask_shape)
        inner_input_shape = nest.map_structure(
            lambda tensor: self._get_shape_tuple((-1, ), tensor, 2), inputs)
        inner_inputs = nest.map_structure_up_to(inputs, array_ops.reshape,
                                                inputs, inner_input_shape)
        output_mask = self.layer.compute_mask(inner_inputs, inner_mask)
        if output_mask is None:
            if mask is None:
                return None
            # input_mask is not None, and output_mask is None:
            # we should return a not-None mask
            output_mask = mask
            for _ in range(2, len(backend.int_shape(mask))):
                output_mask = backend.any(output_mask, axis=-1)
        else:
            # output_mask is not None. We need to reshape it
            input_length = tf_utils.convert_shapes(input_shape)
            input_length = nest.flatten(input_length)[1]
            if not input_length:
                input_length = nest.map_structure(
                    lambda x: backend.shape(x)[1], inputs)
                input_length = nest.flatten(input_length)[0]
            output_mask_int_shape = backend.int_shape(output_mask)
            if output_mask_int_shape is None:
                # if the output_mask does not have a static shape,
                # its shape must be the same as mask's
                if mask is not None:
                    output_mask_int_shape = backend.int_shape(mask)
                else:
                    input_shape = generic_utils.to_list(
                        nest.flatten(input_shape))[0]
                    output_mask_int_shape = backend.compute_output_shape(
                        input_shape)[:-1]
            output_mask_shape = self._get_shape_tuple(
                (-1, input_length), output_mask, 1, output_mask_int_shape[1:])
            output_mask = backend.reshape(output_mask, output_mask_shape)
        return output_mask
Esempio n. 4
0
  def compute_mask(self, inputs, mask=None):
    """Computes an output mask tensor for Embedding layer.

    This is based on the inputs, mask, and the inner layer.
    If batch size is specified:
    Simply return the input `mask`. (An rnn-based implementation with
    more than one rnn inputs is required but not supported in tf.keras yet.)
    Otherwise we call `compute_mask` of the inner layer at each time step.
    If the output mask at each time step is not `None`:
    (E.g., inner layer is Masking or RNN)
    Concatenate all of them and return the concatenation.
    If the output mask at each time step is `None` and the input mask is not
    `None`:(E.g., inner layer is Dense)
    Reduce the input_mask to 2 dimensions and return it.
    Otherwise (both the output mask and the input mask are `None`):
    (E.g., `mask` is not used at all)
    Return `None`.

    Arguments:
      inputs: Tensor with shape [batch size, timesteps, ...] indicating the
        input to TimeDistributed. If static shape information is available for
        "batch size", `mask` is returned unmodified.
      mask: Either None (indicating no masking) or a Tensor indicating the
        input mask for TimeDistributed. The shape can be static or dynamic.

    Returns:
      Either None (no masking), or a [batch size, timesteps, ...] Tensor with
      an output mask for the TimeDistributed layer with the shape beyond the
      second dimension being the value of the input mask shape(if the computed
      output mask is none), an output mask with the shape beyond the first
      dimension being the value of the mask shape(if mask is not None) or
      output mask with the shape beyond the first dimension being the
      value of the computed output shape.

    """
    # cases need to call the layer.compute_mask when input_mask is None:
    # Masking layer and Embedding layer with mask_zero
    input_shape = K.int_shape(inputs)
    if input_shape[0]:
      # batch size matters, we currently do not handle mask explicitly
      return mask
    inner_mask = mask
    if inner_mask is not None:
      inner_mask_shape = self._get_shape_tuple((-1,), mask, 2)
      inner_mask = K.reshape(inner_mask, inner_mask_shape)
    input_uid = generic_utils.object_list_uid(inputs)
    inner_inputs = self._input_map.get(input_uid, inputs)
    output_mask = self.layer.compute_mask(inner_inputs, inner_mask)
    if output_mask is None:
      if mask is None:
        return None
      # input_mask is not None, and output_mask is None:
      # we should return a not-None mask
      output_mask = mask
      for _ in range(2, len(K.int_shape(mask))):
        output_mask = K.any(output_mask, axis=-1)
    else:
      # output_mask is not None. We need to reshape it
      input_length = input_shape[1]
      if not input_length:
        input_length = K.shape(inputs)[1]
      output_mask_int_shape = K.int_shape(output_mask)
      if output_mask_int_shape is None:
        # if the output_mask does not have a static shape,
        # its shape must be the same as mask's
        if mask is not None:
          output_mask_int_shape = K.int_shape(mask)
        else:
          output_mask_int_shape = K.compute_output_shape(input_shape)[:-1]
      output_mask_shape = self._get_shape_tuple(
          (-1, input_length), output_mask, 1, output_mask_int_shape[1:])
      output_mask = K.reshape(output_mask, output_mask_shape)
    return output_mask