コード例 #1
0
 def _test(self, input_shape, block_shape, base_paddings):
     input_shape = np.array(input_shape)
     block_shape = np.array(block_shape)
     if base_paddings is not None:
         base_paddings = np.array(base_paddings)
     # Check with constants.
     paddings, crops = array_ops.required_space_to_batch_paddings(
         input_shape, block_shape, base_paddings)
     paddings_const = tensor_util.constant_value(paddings)
     crops_const = tensor_util.constant_value(crops)
     self.assertIsNotNone(paddings_const)
     self.assertIsNotNone(crops_const)
     self._checkProperties(input_shape, block_shape, base_paddings,
                           paddings_const, crops_const)
     # Check with non-constants.
     assignments = {}
     input_shape_placeholder = array_ops.placeholder(dtypes.int32)
     assignments[input_shape_placeholder] = input_shape
     block_shape_placeholder = array_ops.placeholder(
         dtypes.int32, [len(block_shape)])
     assignments[block_shape_placeholder] = block_shape
     if base_paddings is not None:
         base_paddings_placeholder = array_ops.placeholder(
             dtypes.int32, [len(block_shape), 2])
         assignments[base_paddings_placeholder] = base_paddings
     else:
         base_paddings_placeholder = None
     t_paddings, t_crops = array_ops.required_space_to_batch_paddings(
         input_shape_placeholder, block_shape_placeholder,
         base_paddings_placeholder)
     with self.cached_session():
         paddings_result = t_paddings.eval(assignments)
         crops_result = t_crops.eval(assignments)
     self.assertAllEqual(paddings_result, paddings_const)
     self.assertAllEqual(crops_result, crops_const)
コード例 #2
0
    def call(self, inputs):
        if isinstance(inputs, (list, tuple, np.ndarray)):
            inputs = ops.convert_to_tensor_v2_with_dispatch(inputs)

        inputs = self._preprocess(inputs)

        # If we're not doing any output processing, return right away.
        if self._output_mode is None:
            return inputs

        lookup_data = self._index_lookup_layer(inputs)
        if self._output_mode == INT:

            # Maybe trim the output (NOOP if self._output_sequence_length is None).
            output_tensor = lookup_data[..., :self._output_sequence_length]

            output_shape = output_tensor.shape.as_list()
            output_shape[-1] = self._output_sequence_length

            # If it is a ragged tensor, convert it to dense with correct shape.
            if tf_utils.is_ragged(output_tensor):
                return output_tensor.to_tensor(default_value=0,
                                               shape=output_shape)

            if self._output_sequence_length is None:
                return output_tensor

            padding, _ = array_ops.required_space_to_batch_paddings(
                output_tensor.shape, output_shape)
            return array_ops.pad(output_tensor, padding)

        return lookup_data
コード例 #3
0
 def _test(self, input_shape, block_shape, base_paddings):
   input_shape = np.array(input_shape)
   block_shape = np.array(block_shape)
   if base_paddings is not None:
     base_paddings = np.array(base_paddings)
   # Check with constants.
   paddings, crops = array_ops.required_space_to_batch_paddings(input_shape,
                                                                block_shape,
                                                                base_paddings)
   paddings_const = tensor_util.constant_value(paddings)
   crops_const = tensor_util.constant_value(crops)
   self.assertIsNotNone(paddings_const)
   self.assertIsNotNone(crops_const)
   self._checkProperties(input_shape, block_shape, base_paddings,
                         paddings_const, crops_const)
   # Check with non-constants.
   assignments = {}
   input_shape_placeholder = array_ops.placeholder(dtypes.int32)
   assignments[input_shape_placeholder] = input_shape
   block_shape_placeholder = array_ops.placeholder(dtypes.int32,
                                                   [len(block_shape)])
   assignments[block_shape_placeholder] = block_shape
   if base_paddings is not None:
     base_paddings_placeholder = array_ops.placeholder(dtypes.int32,
                                                       [len(block_shape), 2])
     assignments[base_paddings_placeholder] = base_paddings
   else:
     base_paddings_placeholder = None
   t_paddings, t_crops = array_ops.required_space_to_batch_paddings(
       input_shape_placeholder, block_shape_placeholder,
       base_paddings_placeholder)
   with self.test_session():
     paddings_result = t_paddings.eval(assignments)
     crops_result = t_crops.eval(assignments)
   self.assertAllEqual(paddings_result, paddings_const)
   self.assertAllEqual(crops_result, crops_const)
コード例 #4
0
  def _with_space_to_batch_call(self, inp, filter):  # pylint: disable=redefined-builtin
    """Call functionality for with_space_to_batch."""
    # Handle input whose shape is unknown during graph creation.
    input_spatial_shape = None
    input_shape = self.input_shape
    spatial_dims = self.spatial_dims
    if input_shape.ndims is not None:
      input_shape_list = input_shape.as_list()
      input_spatial_shape = [input_shape_list[i] for i in spatial_dims]
    if input_spatial_shape is None or None in input_spatial_shape:
      input_shape_tensor = array_ops.shape(inp)
      input_spatial_shape = array_ops.stack(
          [input_shape_tensor[i] for i in spatial_dims])

    base_paddings = self.base_paddings
    if base_paddings is None:
      # base_paddings could not be computed at build time since static filter
      # shape was not fully defined.
      filter_shape = array_ops.shape(filter)
      base_paddings = _with_space_to_batch_base_paddings(
          filter_shape, self.num_spatial_dims, self.rate_or_const_rate)
    paddings, crops = array_ops.required_space_to_batch_paddings(
        input_shape=input_spatial_shape,
        base_paddings=base_paddings,
        block_shape=self.dilation_rate)

    dilation_rate = _with_space_to_batch_adjust(self.dilation_rate, 1,
                                                spatial_dims)
    paddings = _with_space_to_batch_adjust(paddings, 0, spatial_dims)
    crops = _with_space_to_batch_adjust(crops, 0, spatial_dims)
    input_converted = array_ops.space_to_batch_nd(
        input=inp, block_shape=dilation_rate, paddings=paddings)

    result = self.op(input_converted, filter)

    result_converted = array_ops.batch_to_space_nd(
        input=result, block_shape=dilation_rate, crops=crops)

    # Recover channel information for output shape if channels are not last.
    if self.data_format is not None and self.data_format.startswith("NC"):
      if not result_converted.shape[1].value and filter is not None:
        output_shape = result_converted.shape.as_list()
        output_shape[1] = filter.shape[-1]
        result_converted.set_shape(output_shape)

    return result_converted