Beispiel #1
0
 def testPaddingsNonNegative2(self):
   with self.test_session(use_gpu=True):
     with self.assertRaisesRegexp(ValueError, "must be non-negative"):
       array_ops.pad(constant_op.constant(
           [1], shape=[1]),
                     constant_op.constant(
                         [-1, 0], shape=[1, 2]))
Beispiel #2
0
 def testInputDims(self):
   with self.test_session(use_gpu=True):
     with self.assertRaises(ValueError):
       array_ops.pad(array_ops.reshape(
           [1, 2], shape=[1, 2, 1, 1, 1, 1]),
                     array_ops.reshape(
                         [1, 2], shape=[1, 2]))
Beispiel #3
0
  def get_observation_model(self, times):
    """Construct observation model matrix from VARMA parameters.

    Args:
      times: A [batch size] vector indicating the times observation models are
          requested for. Unused.
    Returns:
      the observation model matrix. It has shape
        [self.num_features, self.state_dimension].
    """
    del times  # StateSpaceModel will broadcast along the batch dimension
    if self.ar_order > self.ma_order or self.state_num_blocks < 2:
      return array_ops.pad(
          linalg_ops.eye(self.num_features, dtype=self.dtype),
          [[0, 0], [0, self.num_features * (self.state_num_blocks - 1)]],
          name="observation_model")
    else:
      # Add a second observed component which "catches" the accumulated moving
      # average errors as they reach the end of the state. If ar_order >
      # ma_order, this is unnecessary, since accumulated errors cycle naturally.
      return array_ops.concat(
          [
              array_ops.pad(
                  linalg_ops.eye(self.num_features, dtype=self.dtype),
                  [[0, 0], [0,
                            self.num_features * (self.state_num_blocks - 2)]]),
              linalg_ops.eye(self.num_features, dtype=self.dtype)
          ],
          axis=1,
          name="observation_model")
Beispiel #4
0
 def testPaddingsDim4(self):
   with self.test_session(use_gpu=True):
     with self.assertRaises(ValueError):
       array_ops.pad(array_ops.reshape(
           [1, 2], shape=[1, 2]),
                     array_ops.reshape(
                         [1, 2, 3, 4, 5, 6], shape=[3, 2]))
def create_test_network_8():
  """Aligned network for test, including an intermediate addition.

  The graph is similar to create_test_network_1(), except that it includes a few
  more layers on top. The added layers compose two different branches whose
  receptive fields are different. This makes this test case more challenging; in
  particular, this test fails if a naive DFS-like algorithm is used for RF
  computation.

  Returns:
    g: Tensorflow graph object (Graph proto).
  """
  g = ops.Graph()
  with g.as_default():
    # An input test image with unknown spatial resolution.
    x = array_ops.placeholder(
        dtypes.float32, (None, None, None, 1), name='input_image')
    # Left branch before first addition.
    l1 = slim.conv2d(x, 1, [1, 1], stride=4, scope='L1', padding='VALID')
    # Right branch before first addition.
    l2_pad = array_ops.pad(x, [[0, 0], [1, 0], [1, 0], [0, 0]])
    l2 = slim.conv2d(l2_pad, 1, [3, 3], stride=2, scope='L2', padding='VALID')
    l3 = slim.conv2d(l2, 1, [1, 1], stride=2, scope='L3', padding='VALID')
    # First addition.
    l4 = nn.relu(l1 + l3)
    # Left branch after first addition.
    l5 = slim.conv2d(l4, 1, [1, 1], stride=2, scope='L5', padding='VALID')
    # Right branch after first addition.
    l6_pad = array_ops.pad(l4, [[0, 0], [1, 0], [1, 0], [0, 0]])
    l6 = slim.conv2d(l6_pad, 1, [3, 3], stride=2, scope='L6', padding='VALID')
    # Final addition.
    nn.relu(l5 + l6, name='output')

  return g
 def testPaddingsDim2(self):
   with self.session(use_gpu=True):
     with self.assertRaises(ValueError):
       array_ops.pad(array_ops.reshape(
           [1, 2], shape=[1, 2]),
                     array_ops.reshape(
                         [1, 2], shape=[2, 1]))
  def _makeTridiagonalMatrix(self, superdiag, maindiag, subdiag):
    super_pad = [[0, 0], [0, 1], [1, 0]]
    sub_pad = [[0, 0], [1, 0], [0, 1]]

    super_part = array_ops.pad(array_ops.matrix_diag(superdiag), super_pad)
    main_part = array_ops.matrix_diag(maindiag)
    sub_part = array_ops.pad(array_ops.matrix_diag(subdiag), sub_pad)
    return super_part + main_part + sub_part
 def baseline(self, upper, diag, lower, vec):
   diag_part = array_ops.expand_dims(diag, -1) * vec
   lower_part = array_ops.pad(
       array_ops.expand_dims(lower[:, 1:], -1) * vec[:, :-1, :],
       [[0, 0], [1, 0], [0, 0]])
   upper_part = array_ops.pad(
       array_ops.expand_dims(upper[:, :-1], -1) * vec[:, 1:, :],
       [[0, 0], [0, 1], [0, 0]])
   return lower_part + diag_part + upper_part
 def testPaddingsMaximum(self):
     with self.test_session(use_gpu=True):
         with self.assertRaises(Exception):
             array_ops.pad(
                 constant_op.constant([1], shape=[2]), constant_op.constant([2, 0], shape=[1, 2]), mode="REFLECT"
             ).eval()
         with self.assertRaises(Exception):
             array_ops.pad(
                 constant_op.constant([1], shape=[2]), constant_op.constant([0, 3], shape=[1, 2]), mode="SYMMETRIC"
             ).eval()
Beispiel #10
0
  def testShapeFunctionEdgeCases(self):
    # Unknown paddings shape.
    inp = constant_op.constant(0.0, shape=[4, 4, 4, 4])
    padded = array_ops.pad(inp, array_ops.placeholder(dtypes.int32))
    self.assertEqual([None, None, None, None], padded.get_shape().as_list())

    # Unknown input shape.
    inp = array_ops.placeholder(dtypes.float32)
    padded = array_ops.pad(inp, [[2, 2], [2, 2]])
    self.assertEqual([None, None], padded.get_shape().as_list())

    # Unknown input and paddings shape.
    inp = array_ops.placeholder(dtypes.float32)
    padded = array_ops.pad(inp, array_ops.placeholder(dtypes.int32))
    self.assertAllEqual(None, padded.get_shape().ndims)
Beispiel #11
0
def insert_slice_in_zeros(slice_to_insert, dim, dim_size, position):
  """Inserts slice into a larger tensor of zeros.

  Forms a new tensor which is the same shape as slice_to_insert, except that
  the dimension given by 'dim' is expanded to the size given by 'dim_size'.
  'position' determines the position (index) at which to insert the slice within
  that dimension.

  Assumes slice_to_insert.shape[dim] = 1.

  Args:
    slice_to_insert: The slice to insert.
    dim: The dimension which to expand with zeros.
    dim_size: The new size of the 'dim' dimension.
    position: The position of 'slice_to_insert' in the new tensor.

  Returns:
    The new tensor.

  Raises:
    ValueError: If the slice's shape at the given dim is not 1.
  """
  slice_shape = slice_to_insert.shape
  if slice_shape[dim] != 1:
    raise ValueError("Expected slice_to_insert.shape to have {} dim of 1, but "
                     "was {}".format(dim, slice_to_insert.shape[dim]))

  before = [0] * int(len(slice_shape))
  after = before[:]
  before[dim] = position
  after[dim] = dim_size - position - 1

  return array_ops.pad(slice_to_insert, list(zip(before, after)))
  def testPadWithNonConstPaddings(self):
    if test.is_gpu_available(cuda_only=True):
      random_seed.set_random_seed(0)
      x = random_ops.truncated_normal([1, 784], seed=0)
      conv = _two_layer_model(x)
      paddings = array_ops.placeholder(dtype='int32')
      pad = array_ops.pad(conv, paddings)
      output = array_ops.identity(pad)

      paddings_val = [[1, 2], [3, 4], [5, 6], [7, 8]]
      with session.Session() as sess:
        output_val_ref = sess.run(output, feed_dict={paddings: paddings_val})

      with session.Session(config=_get_config()) as sess:
        metadata = config_pb2.RunMetadata()
        output_val = sess.run(
            output, run_metadata=metadata, feed_dict={
                paddings: paddings_val
            })

      nodes = []
      num_transposes = 0
      for node in metadata.cost_graph.node:
        if _is_transpose(node.name):
          num_transposes += 1
        nodes.append(node.name)

      # Four transposes were initially added in the Expand phase of
      # LayoutOptimizer; two of them are cancelled out in the Collapse phase.
      expected_num_transposes = 2
      self.assertEqual(expected_num_transposes, num_transposes)
      self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
      self._assert_trans_nchw_to_nhwc('Pad-0-0', nodes)
      self._assert_vec_nhwc_to_nchw('Pad-1', nodes)
      self.assertAllClose(output_val_ref, output_val, atol=1e-3)
  def inverse_stft_window_fn_inner(frame_length, dtype):
    """Computes a window that can be used in `inverse_stft`.

    Args:
      frame_length: An integer scalar `Tensor`. The window length in samples.
      dtype: Data type of waveform passed to `stft`.

    Returns:
      A window suitable for reconstructing original waveform in `inverse_stft`.

    Raises:
      ValueError: If `frame_length` is not scalar, `forward_window_fn` is not a
      callable that takes a window length and a `dtype` keyword argument and
      returns a `[window_length]` `Tensor` of samples in the provided datatype
      `frame_step` is not scalar, or `frame_step` is not scalar.
    """
    with ops.name_scope(name, 'inverse_stft_window_fn', [forward_window_fn]):
      frame_length = ops.convert_to_tensor(frame_length, name='frame_length')
      frame_length.shape.assert_has_rank(0)

      # Use equation 7 from Griffin + Lim.
      forward_window = forward_window_fn(frame_length, dtype=dtype)
      denom = math_ops.square(forward_window)
      overlaps = -(-frame_length // frame_step)  # Ceiling division.
      denom = array_ops.pad(denom, [(0, overlaps * frame_step - frame_length)])
      denom = array_ops.reshape(denom, [overlaps, frame_step])
      denom = math_ops.reduce_sum(denom, 0, keep_dims=True)
      denom = array_ops.tile(denom, [overlaps, 1])
      denom = array_ops.reshape(denom, [overlaps * frame_step])

      return forward_window / denom[:frame_length]
  def testFusePadAndConv(self):
    with self.cached_session() as sess:
      inputs = [1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6]
      input_op = constant_op.constant(
          np.array(inputs), shape=[1, 2, 3, 2], dtype=dtypes.float32)
      pad_op = array_ops.pad(input_op, [[0, 0], [1, 1], [2, 2], [0, 0]],
                             mode="REFLECT")
      weights = [1, 2, 3, 4, 0.1, 0.2, 0.3, 0.4]
      weights_op = constant_op.constant(
          np.array(weights), shape=[1, 2, 2, 2], dtype=dtypes.float32)
      nn_ops.conv2d(
          pad_op, weights_op, [1, 1, 1, 1], padding="VALID", name="output")
      original_graph_def = sess.graph_def
      original_result = sess.run(["output:0"])
    optimized_graph_def = optimize_for_inference_lib.fuse_resize_and_conv(
        original_graph_def, ["output"])

    with self.cached_session() as sess:
      _ = importer.import_graph_def(
          optimized_graph_def, input_map={}, name="optimized")
      optimized_result = sess.run(["optimized/output:0"])

    self.assertAllClose(original_result, optimized_result)

    for node in optimized_graph_def.node:
      self.assertNotEqual("Conv2D", node.op)
      self.assertNotEqual("ResizeBilinear", node.op)
  def pack_uint8_r2_to_uint32(self, test_input):
    num_rows, num_columns = test_input.get_shape().as_list()
    num_output_columns = int(math.ceil(num_columns / 4.0))
    padding_input = array_ops.pad(
        math_ops.cast(test_input, dtype=dtypes.uint8),
        constant_op.constant([[
            0,
            0,
        ], [0, num_output_columns * 4 - num_columns]]))
    output = array_ops.zeros([num_rows, num_output_columns],
                             dtype=dtypes.uint32)
    num_elements_per_pack = 4
    shift_bits = 8

    iota_r1 = math_ops.range(num_output_columns * num_elements_per_pack)

    for p in range(num_elements_per_pack):
      selected_index = math_ops.equal(
          math_ops.mod(iota_r1, num_elements_per_pack), p)
      gather_index = array_ops.boolean_mask(iota_r1, selected_index)
      gathered_input = array_ops.gather(padding_input, gather_index, axis=1)
      total_shift_bits = shift_bits * (num_elements_per_pack - p - 1)
      left_shift_input = bitwise_ops.left_shift(
          math_ops.cast(gathered_input, dtype=dtypes.uint32), total_shift_bits)
      output = bitwise_ops.bitwise_or(output, left_shift_input)
    return output
Beispiel #16
0
def conv2d_same(inputs, num_outputs, kernel_size, stride, rate=1, scope=None):
  """Strided 2-D convolution with 'SAME' padding.

  When stride > 1, then we do explicit zero-padding, followed by conv2d with
  'VALID' padding.

  Note that

     net = conv2d_same(inputs, num_outputs, 3, stride=stride)

  is equivalent to

     net = tf.contrib.layers.conv2d(inputs, num_outputs, 3, stride=1,
     padding='SAME')
     net = subsample(net, factor=stride)

  whereas

     net = tf.contrib.layers.conv2d(inputs, num_outputs, 3, stride=stride,
     padding='SAME')

  is different when the input's height or width is even, which is why we add the
  current function. For more details, see ResnetUtilsTest.testConv2DSameEven().

  Args:
    inputs: A 4-D tensor of size [batch, height_in, width_in, channels].
    num_outputs: An integer, the number of output filters.
    kernel_size: An int with the kernel_size of the filters.
    stride: An integer, the output stride.
    rate: An integer, rate for atrous convolution.
    scope: Scope.

  Returns:
    output: A 4-D tensor of size [batch, height_out, width_out, channels] with
      the convolution output.
  """
  if stride == 1:
    return layers_lib.conv2d(
        inputs,
        num_outputs,
        kernel_size,
        stride=1,
        rate=rate,
        padding='SAME',
        scope=scope)
  else:
    kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
    pad_total = kernel_size_effective - 1
    pad_beg = pad_total // 2
    pad_end = pad_total - pad_beg
    inputs = array_ops.pad(
        inputs, [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])
    return layers_lib.conv2d(
        inputs,
        num_outputs,
        kernel_size,
        stride=stride,
        rate=rate,
        padding='VALID',
        scope=scope)
def create_test_network_2():
  """Aligned network for test.

  The graph corresponds to a variation to the example from the second figure in
  go/cnn-rf-computation#arbitrary-computation-graphs. Layers 2 and 3 are changed
  to max-pooling operations. Since the functionality is the same as convolution,
  the network is aligned and the receptive field size is the same as from the
  network created using create_test_network_1().

  Returns:
    g: Tensorflow graph object (Graph proto).
  """
  g = ops.Graph()
  with g.as_default():
    # An input test image with unknown spatial resolution.
    x = array_ops.placeholder(
        dtypes.float32, (None, None, None, 1), name='input_image')
    # Left branch.
    l1 = slim.conv2d(x, 1, [1, 1], stride=4, scope='L1', padding='VALID')
    # Right branch.
    l2_pad = array_ops.pad(x, [[0, 0], [1, 0], [1, 0], [0, 0]])
    l2 = slim.max_pool2d(l2_pad, [3, 3], stride=2, scope='L2', padding='VALID')
    l3 = slim.max_pool2d(l2, [1, 1], stride=2, scope='L3', padding='VALID')
    # Addition.
    nn.relu(l1 + l3, name='output')
  return g
def create_test_network():
  """Convolutional neural network for test.

  Returns:
    g: Tensorflow graph object (Graph proto).
  """
  g = ops.Graph()
  with g.as_default():
    # An input test image with unknown spatial resolution.
    x = array_ops.placeholder(
        dtypes.float32, (None, None, None, 1), name='input_image')
    # Left branch before first addition.
    l1 = slim.conv2d(x, 1, [1, 1], stride=4, scope='L1', padding='VALID')
    # Right branch before first addition.
    l2_pad = array_ops.pad(x, [[0, 0], [1, 0], [1, 0], [0, 0]], name='L2_pad')
    l2 = slim.conv2d(l2_pad, 1, [3, 3], stride=2, scope='L2', padding='VALID')
    l3 = slim.max_pool2d(l2, [3, 3], stride=2, scope='L3', padding='SAME')
    # First addition.
    l4 = nn.relu(l1 + l3, name='L4_relu')
    # Left branch after first addition.
    l5 = slim.conv2d(l4, 1, [1, 1], stride=2, scope='L5', padding='SAME')
    # Right branch after first addition.
    l6 = slim.conv2d(l4, 1, [3, 3], stride=2, scope='L6', padding='SAME')
    # Final addition.
    gen_math_ops.add(l5, l6, name='L7_add')

  return g
  def testPadWithConstPaddings(self):
    if test.is_gpu_available(cuda_only=True):
      random_seed.set_random_seed(0)
      x = random_ops.truncated_normal([1, 784], seed=0)
      conv = _two_layer_model(x)
      paddings_val = [[1, 2], [3, 4], [5, 6], [7, 8]]
      paddings = constant_op.constant(
          paddings_val, dtype='int32', name='PaddingsConst')
      pad = array_ops.pad(conv, paddings)
      output = array_ops.identity(pad)

      with session.Session() as sess:
        output_val_ref = sess.run(output)

      with session.Session(config=_get_config()) as sess:
        metadata = config_pb2.RunMetadata()
        output_val = sess.run(output, run_metadata=metadata)

      nodes = []
      num_transposes = 0
      for node in metadata.cost_graph.node:
        if node.name.startswith('LayoutOptimizerTranspose'):
          num_transposes += 1
        nodes.append(node.name)

      # Four transposes were initially added in the Expand phase of
      # LayoutOptimizer; two of them are cancelled out in the Collapse phase.
      expected_num_transposes = 2
      self.assertEqual(expected_num_transposes, num_transposes)
      self.assertIn('LayoutOptimizerTransposeNHWCToNCHW-Conv2D-0', nodes)
      self.assertIn('LayoutOptimizerTransposeNCHWToNHWC-Pad-0-0', nodes)
      self.assertIn('LayoutOptimizer-Pad-PaddingsConst', nodes)
      self.assertAllClose(output_val_ref, output_val, atol=1e-3)
Beispiel #20
0
def frames(signal, frame_length, frame_step, name=None):
  """Frame a signal into overlapping frames.

  May be used in front of spectral functions.

  For example:

  ```python
  pcm = tf.placeholder(tf.float32, [None, 9152])
  frames = tf.contrib.signal.frames(pcm, 512, 180)
  magspec = tf.abs(tf.spectral.rfft(frames, [512]))
  image = tf.expand_dims(magspec, 3)
  ```

  Args:
    signal: A `Tensor` of shape `[batch_size, signal_length]`.
    frame_length: An `int32` or `int64` `Tensor`. The length of each frame.
    frame_step: An `int32` or `int64` `Tensor`. The step between frames.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of frames with shape `[batch_size, num_frames, frame_length]`.

  Raises:
    ValueError: if signal does not have rank 2.
  """
  with ops.name_scope(name, "frames", [signal, frame_length, frame_step]):
    signal = ops.convert_to_tensor(signal, name="signal")
    frame_length = ops.convert_to_tensor(frame_length, name="frame_length")
    frame_step = ops.convert_to_tensor(frame_step, name="frame_step")

    signal_rank = signal.shape.ndims

    if signal_rank != 2:
      raise ValueError("expected signal to have rank 2 but was " + signal_rank)

    signal_length = array_ops.shape(signal)[1]

    num_frames = math_ops.ceil((signal_length - frame_length) / frame_step)
    num_frames = 1 + math_ops.cast(num_frames, dtypes.int32)

    pad_length = (num_frames - 1) * frame_step + frame_length
    pad_signal = array_ops.pad(signal, [[0, 0], [0,
                                                 pad_length - signal_length]])

    indices_frame = array_ops.expand_dims(math_ops.range(frame_length), 0)
    indices_frames = array_ops.tile(indices_frame, [num_frames, 1])

    indices_step = array_ops.expand_dims(
        math_ops.range(num_frames) * frame_step, 1)
    indices_steps = array_ops.tile(indices_step, [1, frame_length])

    indices = indices_frames + indices_steps

    # TODO(androbin): remove `transpose` when `gather` gets `axis` support
    pad_signal = array_ops.transpose(pad_signal)
    signal_frames = array_ops.gather(pad_signal, indices)
    signal_frames = array_ops.transpose(signal_frames, perm=[2, 0, 1])

    return signal_frames
def _build_multilabel_adjacency(sparse_labels):
  """Builds multilabel adjacency matrix.

  As of March 14th, 2017, there's no op for the dot product between
  two sparse tensors in TF. However, there is `sparse_minimum` op which is
  equivalent to an AND op between two sparse boolean tensors.
  This computes the dot product between two sparse boolean inputs.

  Args:
    sparse_labels: List of 1-D boolean sparse tensors.

  Returns:
    adjacency_matrix: 2-D dense `Tensor`.
  """
  num_pairs = len(sparse_labels)
  adjacency_matrix = array_ops.zeros([num_pairs, num_pairs])
  for i in range(num_pairs):
    for j in range(num_pairs):
      sparse_dot_product = math_ops.to_float(
          sparse_ops.sparse_reduce_sum(sparse_ops.sparse_minimum(
              sparse_labels[i], sparse_labels[j])))
      sparse_dot_product = array_ops.expand_dims(sparse_dot_product, 0)
      sparse_dot_product = array_ops.expand_dims(sparse_dot_product, 1)
      one_hot_matrix = array_ops.pad(sparse_dot_product,
                                     [[i, num_pairs-i-1],
                                      [j, num_pairs-j-1]], 'CONSTANT')
      adjacency_matrix += one_hot_matrix

  return adjacency_matrix
Beispiel #22
0
  def testPad(self):
    for dtype in self.numeric_types:
      self._testBinary(
          array_ops.pad,
          np.array(
              [[1, 2, 3], [4, 5, 6]], dtype=dtype),
          np.array(
              [[1, 2], [2, 1]], dtype=np.int32),
          expected=np.array(
              [[0, 0, 0, 0, 0, 0],
               [0, 0, 1, 2, 3, 0],
               [0, 0, 4, 5, 6, 0],
               [0, 0, 0, 0, 0, 0],
               [0, 0, 0, 0, 0, 0]],
              dtype=dtype))

      self._testBinary(
          lambda x, y: array_ops.pad(x, y, constant_values=7),
          np.array(
              [[1, 2, 3], [4, 5, 6]], dtype=dtype),
          np.array(
              [[0, 3], [2, 1]], dtype=np.int32),
          expected=np.array(
              [[7, 7, 1, 2, 3, 7],
               [7, 7, 4, 5, 6, 7],
               [7, 7, 7, 7, 7, 7],
               [7, 7, 7, 7, 7, 7],
               [7, 7, 7, 7, 7, 7]],
              dtype=dtype))
def create_test_network():
  """Convolutional neural network for test.

  Returns:
    name_to_node: Dict keyed by node name, each entry containing the node's
      NodeDef.
  """
  g = ops.Graph()
  with g.as_default():
    # An input test image with unknown spatial resolution.
    x = array_ops.placeholder(
        dtypes.float32, (None, None, None, 1), name='input_image')
    # Left branch before first addition.
    l1 = slim.conv2d(x, 1, [1, 1], stride=4, scope='L1', padding='VALID')
    # Right branch before first addition.
    l2_pad = array_ops.pad(x, [[0, 0], [1, 0], [1, 0], [0, 0]], name='L2_pad')
    l2 = slim.conv2d(l2_pad, 1, [3, 3], stride=2, scope='L2', padding='VALID')
    l3 = slim.max_pool2d(l2, [3, 3], stride=2, scope='L3', padding='SAME')
    # First addition.
    l4 = nn.relu(l1 + l3, name='L4_relu')
    # Left branch after first addition.
    l5 = slim.conv2d(l4, 1, [1, 1], stride=2, scope='L5', padding='SAME')
    # Right branch after first addition.
    l6 = slim.conv2d(l4, 1, [3, 3], stride=2, scope='L6', padding='SAME')
    # Final addition.
    gen_math_ops.add(l5, l6, name='L7_add')

  name_to_node = graph_compute_order.parse_graph_nodes(g.as_graph_def())
  return name_to_node
 def _testPad(self, np_inputs, paddings, mode):
     np_val = self._npPad(np_inputs, paddings, mode=mode)
     with self.test_session(use_gpu=True):
         tf_val = array_ops.pad(np_inputs, paddings, mode=mode)
         out = tf_val.eval()
     self.assertAllEqual(np_val, out)
     self.assertShapeEqual(np_val, tf_val)
def create_test_network_7():
  """Aligned network for test, with a control dependency.

  The graph is similar to create_test_network_1(), except that it includes an
  assert operation on the left branch.

  Returns:
    g: Tensorflow graph object (Graph proto).
  """
  g = ops.Graph()
  with g.as_default():
    # An 8x8 test image.
    x = array_ops.placeholder(dtypes.float32, (1, 8, 8, 1), name='input_image')
    # Left branch.
    l1 = slim.conv2d(x, 1, [1, 1], stride=4, scope='L1', padding='VALID')
    l1_shape = array_ops.shape(l1)
    assert_op = control_flow_ops.Assert(
        gen_math_ops.equal(l1_shape[1], 2), [l1_shape], summarize=4)
    # Right branch.
    l2_pad = array_ops.pad(x, [[0, 0], [1, 0], [1, 0], [0, 0]])
    l2 = slim.conv2d(l2_pad, 1, [3, 3], stride=2, scope='L2', padding='VALID')
    l3 = slim.conv2d(l2, 1, [1, 1], stride=2, scope='L3', padding='VALID')
    # Addition.
    with ops.control_dependencies([assert_op]):
      nn.relu(l1 + l3, name='output')
  return g
Beispiel #26
0
 def testCollapseAdjacentNonPaddedDimensions(self):
   # pyformat: disable
   paddings_values = [[[0, 0], [0, 0], [0, 0], [0, 1]],
                      [[0, 0], [2, 3], [0, 0], [0, 0]],
                      [[0, 0], [0, 0], [0, 0], [0, 0]]]
   # pyformat: enable
   for paddings_value in paddings_values:
     for dtype in [dtypes.float32, dtypes.int32]:
       inp = constant_op.constant(1, shape=[8, 28, 28, 3], dtype=dtype)
       paddings = constant_op.constant(paddings_value, dtype=dtypes.int32)
       padded = array_ops.pad(inp, paddings)
       middle = array_ops.slice(padded, [row[0] for row in paddings_value],
                                [dim.value for dim in inp.shape.dims])
       left = array_ops.slice(padded, [0, 0, 0, 0],
                              [row[0] for row in paddings_value])
       right = array_ops.slice(
           padded,
           [paddings_value[i][0] + inp.shape.dims[i].value for i in range(4)],
           [-1, -1, -1, -1])
       with self.test_session(use_gpu=True):
         self.assertAllEqual(inp.eval(), middle.eval())
         self.assertAllEqual(
             np.zeros([row[0] for row in paddings_value]), left.eval())
         self.assertAllEqual(
             np.zeros([row[1] for row in paddings_value]), right.eval())
  def _forward(self, x):
    # Pad the last dim with a zeros vector. We need this because it lets us
    # infer the scale in the inverse function.
    y = array_ops.expand_dims(x, dim=-1) if self._static_event_ndims == 0 else x
    ndims = (y.get_shape().ndims if y.get_shape().ndims is not None
             else array_ops.rank(y))
    y = array_ops.pad(y,
                      paddings=array_ops.concat(
                          (array_ops.zeros(
                              (ndims - 1, 2), dtype=dtypes.int32), [[0, 1]]),
                          0))

    # Set shape hints.
    if x.get_shape().ndims is not None:
      shape = x.get_shape().as_list()
      if self._static_event_ndims == 0:
        shape += [2]
      elif shape[-1] is not None:
        shape[-1] += 1
      shape = tensor_shape.TensorShape(shape)
      y.get_shape().assert_is_compatible_with(shape)
      y.set_shape(shape)

    # Since we only support event_ndims in [0, 1] and we do padding, we always
    # reduce over the last dimension, i.e., dim=-1 (which is the default).
    return nn_ops.softmax(y)
Beispiel #28
0
 def get_noise_transform(self):
   # transition_power_noise_accumulator makes assumptions about this
   # transformation. If the noise transform is modified or overriden,
   # transition_power_noise_accumulator must be modified as well (or discarded,
   # as it is simply an optimization).
   return array_ops.pad(
       array_ops.ones([1], dtype=self.dtype),
       paddings=[(0, self._periodicity - 2)])[..., None]
Beispiel #29
0
 def testScalars(self):
   paddings = np.zeros((0, 2), dtype=np.int32)
   inp = np.asarray(7)
   with self.test_session(use_gpu=True):
     tf_val = array_ops.pad(inp, paddings)
     out = tf_val.eval()
   self.assertAllEqual(inp, out)
   self.assertShapeEqual(inp, tf_val)
def _maybe_pad_for_rfft(input_tensor, fft_rank, fft_length, is_reverse=False):
  """Pads `input_tensor` to `fft_length` on its inner-most `fft_rank` dims."""
  fft_shape = _tensor_util.constant_value_as_shape(fft_length)

  # Edge case: skip padding empty tensors.
  if (input_tensor.shape.ndims is not None and
      any(dim.value == 0 for dim in input_tensor.shape.dims)):
    return input_tensor

  # If we know the shapes ahead of time, we can either skip or pre-compute the
  # appropriate paddings. Otherwise, fall back to computing paddings in
  # TensorFlow.
  if fft_shape.is_fully_defined() and input_tensor.shape.ndims is not None:
    # Slice the last FFT-rank dimensions from input_tensor's shape.
    input_fft_shape = input_tensor.shape[-fft_shape.ndims:]

    if input_fft_shape.is_fully_defined():
      # In reverse, we only pad the inner-most dimension to fft_length / 2 + 1.
      if is_reverse:
        fft_shape = fft_shape[:-1].concatenate(
            fft_shape.dims[-1].value // 2 + 1)

      paddings = [[0, max(fft_dim.value - input_dim.value, 0)]
                  for fft_dim, input_dim in zip(
                      fft_shape.dims, input_fft_shape.dims)]
      if any(pad > 0 for _, pad in paddings):
        outer_paddings = [[0, 0]] * max((input_tensor.shape.ndims -
                                         fft_shape.ndims), 0)
        return _array_ops.pad(input_tensor, outer_paddings + paddings)
      return input_tensor

  # If we can't determine the paddings ahead of time, then we have to pad. If
  # the paddings end up as zero, tf.pad has a special-case that does no work.
  input_rank = _array_ops.rank(input_tensor)
  input_fft_shape = _array_ops.shape(input_tensor)[-fft_rank:]
  outer_dims = _math_ops.maximum(0, input_rank - fft_rank)
  outer_paddings = _array_ops.zeros([outer_dims], fft_length.dtype)
  # In reverse, we only pad the inner-most dimension to fft_length / 2 + 1.
  if is_reverse:
    fft_length = _array_ops.concat([fft_length[:-1],
                                    fft_length[-1:] // 2 + 1], 0)
  fft_paddings = _math_ops.maximum(0, fft_length - input_fft_shape)
  paddings = _array_ops.concat([outer_paddings, fft_paddings], 0)
  paddings = _array_ops.stack([_array_ops.zeros_like(paddings), paddings],
                              axis=1)
  return _array_ops.pad(input_tensor, paddings)