def testFeedIndexedSlicesWithoutDenseShape(self):
   with session.Session() as s:
     values = np.array([1.0, 2.0]).astype(np.float32)
     indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
     dense_shape = None
     ind = ops.IndexedSlices(
         array_ops.placeholder(dtype=np.float32,
                               shape=(2,)),
         array_ops.placeholder(dtype=np.int64,
                               shape=(2, 3)),
         None)
     ind_values = array_ops.identity(ind.values)
     ind_indices = array_ops.identity(ind.indices)
     ind2 = ops.IndexedSlices(ind_values, ind_indices)
     # Feed with tuple
     values_out, indices_out = s.run(
         [ind_values, ind_indices], {ind: (values, indices)})
     self.assertAllEqual(values_out, values)
     self.assertAllEqual(indices_out, indices)
     # Feed with IndexedSlicesValue
     values_out, indices_out = s.run(
         [ind_values, ind_indices],
         {ind: ops.IndexedSlicesValue(values, indices, dense_shape)})
     self.assertAllEqual(values_out, values)
     self.assertAllEqual(indices_out, indices)
     # Feed with IndexedSlicesValue, fetch IndexedSlicesValue
     ind2_out = s.run(ind2, {ind: ops.IndexedSlicesValue(values, indices,
                                                         dense_shape)})
     self.assertAllEqual(ind2_out.values, values)
     self.assertAllEqual(ind2_out.indices, indices)
     self.assertAllEqual(ind2_out.dense_shape, dense_shape)
예제 #2
0
  def test_raw_tf_compatibility(self):
    # test calling layers/models on TF tensors
    a = keras.layers.Input(shape=(32,), name='input_a')
    b = keras.layers.Input(shape=(32,), name='input_b')

    dense = keras.layers.Dense(16, name='dense_1')
    a_2 = dense(a)
    b_2 = dense(b)
    merged = keras.layers.concatenate([a_2, b_2], name='merge')
    c = keras.layers.Dense(64, name='dense_2')(merged)
    d = keras.layers.Dense(5, name='dense_3')(c)

    model = keras.models.Model(inputs=[a, b], outputs=[c, d], name='model')

    j = keras.layers.Input(shape=(32,), name='input_j')
    k = keras.layers.Input(shape=(32,), name='input_k')
    m, n = model([j, k])
    tf_model = keras.models.Model([j, k], [m, n])

    j_tf = array_ops.placeholder(dtype=dtypes.float32, shape=(None, 32))
    k_tf = array_ops.placeholder(dtype=dtypes.float32, shape=(None, 32))
    m_tf, n_tf = tf_model([j_tf, k_tf])
    self.assertListEqual(m_tf.get_shape().as_list(), [None, 64])
    self.assertListEqual(n_tf.get_shape().as_list(), [None, 5])

    # test merge
    keras.layers.concatenate([j_tf, k_tf], axis=1)
    keras.layers.add([j_tf, k_tf])

    # test tensor input
    x = array_ops.placeholder(shape=(None, 2), dtype=dtypes.float32)
    keras.layers.InputLayer(input_tensor=x)

    x = keras.layers.Input(tensor=x)
    keras.layers.Dense(2)(x)
예제 #3
0
  def _testReduction(self,
                     tf_reduce_fn,
                     np_reduce_fn,
                     dtype,
                     test_inputs,
                     rtol=1e-4,
                     atol=1e-4):
    """Tests that the output of 'tf_reduce_fn' matches numpy's output."""

    for test_input in test_inputs:
      with self.test_session() as sess:
        with self.test_scope():
          a = array_ops.placeholder(dtype)
          index = array_ops.placeholder(dtypes.int32)
          out = tf_reduce_fn(a, index)
        result = sess.run(out, {a: test_input, index: [0]})
        self.assertAllClose(
            result, np_reduce_fn(test_input, axis=0), rtol=rtol, atol=atol)

        result = sess.run(out, {a: test_input, index: [1]})
        self.assertAllClose(
            result, np_reduce_fn(test_input, axis=1), rtol=rtol, atol=atol)

        result = sess.run(out, {a: test_input, index: [-1]})
        self.assertAllClose(
            result, np_reduce_fn(test_input, axis=1), rtol=rtol, atol=atol)

        with self.assertRaisesWithPredicateMatch(
            errors_impl.InvalidArgumentError, 'Invalid reduction dim'):
          sess.run(out, {a: test_input, index: [-33]})

        with self.assertRaisesWithPredicateMatch(
            errors_impl.InvalidArgumentError, 'Invalid reduction dim'):
          sess.run(out, {a: test_input, index: [2]})
  def testPaddedBatchDatasetShapeSpecifications(self):
    int_placeholder = array_ops.placeholder(dtypes.int32)
    float_placeholder = array_ops.placeholder(dtypes.float32)
    string_placeholder = array_ops.placeholder(dtypes.string)
    input_dataset = dataset_ops.Dataset.from_tensors(
        (int_placeholder, float_placeholder, string_placeholder))

    # Test different ways of specifying the `padded_shapes` argument.
    dynamic_padding_from_tensor_shapes = input_dataset.padded_batch(
        32,
        padded_shapes=(tensor_shape.TensorShape([None]),
                       tensor_shape.TensorShape([None, None]),
                       tensor_shape.TensorShape([37])))
    dynamic_padding_from_lists = input_dataset.padded_batch(
        32, padded_shapes=([None], [None, None], [37]))
    dynamic_padding_from_lists_with_minus_one = input_dataset.padded_batch(
        32, padded_shapes=([-1], [-1, -1], [37]))
    dynamic_padding_from_tensors = input_dataset.padded_batch(
        32,
        padded_shapes=(constant_op.constant([-1], dtype=dtypes.int64),
                       constant_op.constant([-1, -1], dtype=dtypes.int64),
                       constant_op.constant([37], dtype=dtypes.int64)))

    for dataset in [dynamic_padding_from_tensor_shapes,
                    dynamic_padding_from_lists,
                    dynamic_padding_from_lists_with_minus_one,
                    dynamic_padding_from_tensors]:
      self.assertEqual([None, None], dataset.output_shapes[0].as_list())
      self.assertEqual([None, None, None], dataset.output_shapes[1].as_list())
      self.assertEqual([None, 37], dataset.output_shapes[2].as_list())
 def testAutoPack(self):
   with self.test_session():
     h = array_ops.placeholder(dtypes_lib.int32, shape=[])
     w = array_ops.placeholder(dtypes_lib.int32, shape=[])
     z = array_ops.ones([h, w])
     out = z.eval(feed_dict={h: 4, w: 16})
   self.assertAllEqual(out, np.array([[1] * 16] * 4))
예제 #6
0
  def testCustomGradient(self):
    dtype = dtypes.float32

    @function.Defun(dtype, dtype, dtype)
    def XentLossGrad(logits, labels, dloss):
      dlogits = array_ops.reshape(dloss, [-1, 1]) * (
          nn_ops.softmax(logits) - labels)
      dlabels = array_ops.zeros_like(labels)
      # Takes exp(dlogits) to differentiate it from the "correct" gradient.
      return math_ops.exp(dlogits), dlabels

    @function.Defun(dtype, dtype, grad_func=XentLossGrad)
    def XentLoss(logits, labels):
      return math_ops.reduce_sum(labels * math_ops.log(nn_ops.softmax(logits)),
                                 1)

    g = ops.Graph()
    with g.as_default():
      logits = array_ops.placeholder(dtype)
      labels = array_ops.placeholder(dtype)
      loss = XentLoss(logits, labels)
      dlogits = gradients_impl.gradients([loss], [logits])

    x = np.random.uniform(-10., 10., size=(4, 9)).astype(np.float32)
    prob = np.exp(x) / np.sum(np.exp(x), 1, keepdims=1)
    y = np.random.uniform(-10., 10., size=(4, 9)).astype(np.float32)
    for cfg in _OptimizerOptions():
      tf_logging.info("cfg = %s", cfg)
      with session.Session(graph=g, config=cfg) as sess:
        out, = sess.run(dlogits, {logits: x, labels: y})
      self.assertAllClose(out, np.exp(prob - y))
예제 #7
0
  def _testReduceSum(self,
                     expected_result,
                     dtype,
                     test_inputs,
                     rtol=1e-3,
                     atol=1e-4):
    """Tests reduce sum on a list of input arrays.

    For each array in test_inputs, check that performing reduce sum on the array
    produces a value that is close to the expected result.

    Args:
      expected_result: the expected result.
      dtype: the data type of the reduce sum operation.
      test_inputs: a list of input arrays for the reduce sum operation.
      rtol: the relative error.
      atol: the absolute error.
    """

    for test_input in test_inputs:
      with self.test_session() as sess:
        with self.test_scope():
          a = array_ops.placeholder(dtype)
          index = array_ops.placeholder(dtypes.int32)
          out = math_ops.reduce_sum(a, index)
        result = sess.run(out, {
            a: np.array(test_input, dtype=dtype),
            index: [0]
        })
        # Compare the results using float32 type.
        self.assertAllClose(
            np.float32(result),
            np.float32(expected_result),
            rtol=rtol,
            atol=atol)
 def _verifySolve(self, x, y, batch_dims=None):
   for np_type in [np.float32, np.float64, np.complex64, np.complex128]:
     if np_type == np.float32 or np_type == np.complex64:
       tol = 1e-5
     else:
       tol = 1e-12
     for adjoint in False, True:
       if np_type is [np.float32, np.float64]:
         a = x.real().astype(np_type)
         b = y.real().astype(np_type)
       else:
         a = x.astype(np_type)
         b = y.astype(np_type)
         a_np = np.conj(np.transpose(a)) if adjoint else a
       if batch_dims is not None:
         a = np.tile(a, batch_dims + [1, 1])
         a_np = np.tile(a_np, batch_dims + [1, 1])
         b = np.tile(b, batch_dims + [1, 1])
       np_ans = np.linalg.solve(a_np, b)
       for use_placeholder in False, True:
         with self.test_session(use_gpu=True) as sess:
           if use_placeholder:
             a_ph = array_ops.placeholder(dtypes.as_dtype(np_type))
             b_ph = array_ops.placeholder(dtypes.as_dtype(np_type))
             tf_ans = linalg_ops.matrix_solve(a_ph, b_ph, adjoint=adjoint)
             out = sess.run(tf_ans, {a_ph: a, b_ph: b})
           else:
             tf_ans = linalg_ops.matrix_solve(a, b, adjoint=adjoint)
             out = tf_ans.eval()
             self.assertEqual(tf_ans.get_shape(), out.shape)
           self.assertEqual(np_ans.shape, out.shape)
           self.assertAllClose(np_ans, out, atol=tol, rtol=tol)
예제 #9
0
  def testCondNested(self):
    with context.graph_mode(), self.test_session():
      v = resource_variable_ops.ResourceVariable(1.0)
      variables.global_variables_initializer().run()
      p = array_ops.placeholder(dtype=dtypes.bool)
      q = array_ops.placeholder(dtype=dtypes.bool)
      with function.AutomaticControlDependencies() as c:

        def true_fn():
          v.assign(v + 1, name='true')
          return 1.0

        def false_fn():

          def inner_true_fn():
            v.assign(v * 2, name='false_true')
            return 2.0

          def inner_false_fn():
            v.assign(v * 3, name='false_false')
            return 3.0

          control_flow_ops.cond(q, inner_true_fn, inner_false_fn)
          return 1.0

        control_flow_ops.cond(p, true_fn, false_fn)
        with ops.name_scope('final'):
          val = v.read_value()
        val = c.mark_as_return(val)
      self.assertAllEqual(val.eval(feed_dict={p: False, q: False}), 3.0)
      self.assertAllEqual(val.eval(feed_dict={p: False, q: True}), 6.0)
      self.assertAllEqual(val.eval(feed_dict={p: True, q: True}), 7.0)
      self.assertAllEqual(val.eval(feed_dict={p: True, q: False}), 8.0)
예제 #10
0
  def testCDFWithDynamicEventShapeKnownNdims(self):
    """Test that dynamically-sized events with unknown shape work."""
    batch_size = 2
    histograms = array_ops.placeholder(dtype=dtypes.float32,
                                       shape=(batch_size, None))
    event = array_ops.placeholder(dtype=dtypes.float32, shape=(batch_size,))
    dist = categorical.Categorical(probs=histograms)
    cdf_op = dist.cdf(event)

    # Feed values into the placeholder with different shapes
    # three classes.
    event_feed_one = [0, 1]
    histograms_feed_one = [[0.5, 0.3, 0.2], [1.0, 0.0, 0.0]]
    expected_cdf_one = [0.0, 1.0]
    feed_dict_one = {
        histograms: histograms_feed_one,
        event: event_feed_one
    }

    # six classes.
    event_feed_two = [2, 5]
    histograms_feed_two = [[0.9, 0.0, 0.0, 0.0, 0.0, 0.1],
                           [0.15, 0.2, 0.05, 0.35, 0.13, 0.12]]
    expected_cdf_two = [0.9, 0.88]
    feed_dict_two = {
        histograms: histograms_feed_two,
        event: event_feed_two
    }

    with self.cached_session() as sess:
      actual_cdf_one = sess.run(cdf_op, feed_dict=feed_dict_one)
      actual_cdf_two = sess.run(cdf_op, feed_dict=feed_dict_two)

    self.assertAllClose(actual_cdf_one, expected_cdf_one)
    self.assertAllClose(actual_cdf_two, expected_cdf_two)
예제 #11
0
  def testShapeFunctionEdgeCases(self):
    # split_dim greater than rank of input.
    with self.assertRaises(ValueError):
      array_ops.split(value=[[0, 1], [2, 3]], num_or_size_splits=4, axis=2)

    # split dim less than -(rank of input)
    with self.assertRaises(ValueError):
      array_ops.split(value=[[0, 1], [2, 3]], num_or_size_splits=4, axis=-3)

    # num_split does not evenly divide the size in split_dim.
    with self.assertRaisesRegexp(ValueError, "should evenly divide"):
      array_ops.split(value=[0, 1, 2, 3], num_or_size_splits=3, axis=0)

    # Unknown split_dim.
    splits = array_ops.split(
        value=[[0, 1, 2, 3]],
        num_or_size_splits=4,
        axis=array_ops.placeholder(dtypes.int32))
    for s in splits:
      self.assertEqual([None, None], s.get_shape().as_list())

    # Unknown split_dim and input shape.
    splits = array_ops.split(
        value=array_ops.placeholder(dtypes.float32),
        num_or_size_splits=4,
        axis=array_ops.placeholder(dtypes.int32))
    for s in splits:
      self.assertEqual(None, s.get_shape().ndims)
예제 #12
0
  def _testStreamingQuantileBucketsHelper(
      self, inputs, num_quantiles=3, expected_buckets=None):
    """Helper to test quantile buckets on different inputs."""

    # set generate_quantiles to True since the test will generate fewer
    # boundaries otherwise.
    with self.test_session() as sess:
      accumulator = quantile_ops.QuantileAccumulator(
          init_stamp_token=0, num_quantiles=num_quantiles,
          epsilon=0.001, name="q1", generate_quantiles=True)
      resources.initialize_resources(resources.shared_resources()).run()
    input_column = array_ops.placeholder(dtypes.float32)
    weights = array_ops.placeholder(dtypes.float32)
    update = accumulator.add_summary(
        stamp_token=0,
        column=input_column,
        example_weights=weights)

    with self.test_session() as sess:
      sess.run(update,
               {input_column: inputs,
                weights: [1] * len(inputs)})

    with self.test_session() as sess:
      sess.run(accumulator.flush(stamp_token=0, next_stamp_token=1))
      are_ready_flush, buckets = (accumulator.get_buckets(stamp_token=1))
      buckets, are_ready_flush = (sess.run(
          [buckets, are_ready_flush]))
      self.assertEqual(True, are_ready_flush)
      # By default, use 3 quantiles, 4 boundaries for simplicity.
      self.assertEqual(num_quantiles + 1, len(buckets))
      if expected_buckets:
        self.assertAllEqual(buckets, expected_buckets)
예제 #13
0
  def testStreamingQuantileBuckets(self):
    """Sets up the quantile summary op test as follows.

    100 batches of data is added to the accumulator. The batches are in form:
    [0 1 .. 99]
    [100 101 .. 200]
    ...
    [9900 9901 .. 9999]
    All the batches have 1 for all the example weights.
    """
    with self.test_session() as sess:
      accumulator = quantile_ops.QuantileAccumulator(
          init_stamp_token=0, num_quantiles=3, epsilon=0.01, name="q1")
      resources.initialize_resources(resources.shared_resources()).run()
    weight_placeholder = array_ops.placeholder(dtypes.float32)
    dense_placeholder = array_ops.placeholder(dtypes.float32)
    update = accumulator.add_summary(
        stamp_token=0,
        column=dense_placeholder,
        example_weights=weight_placeholder)
    with self.test_session() as sess:
      for i in range(100):
        dense_float = np.linspace(
            i * 100, (i + 1) * 100 - 1, num=100).reshape(-1, 1)
        sess.run(update, {
            dense_placeholder: dense_float,
            weight_placeholder: np.ones(shape=(100, 1), dtype=np.float32)
        })

    with self.test_session() as sess:
      sess.run(accumulator.flush(stamp_token=0, next_stamp_token=1))
      are_ready_flush, buckets = (accumulator.get_buckets(stamp_token=1))
      buckets, are_ready_flush = (sess.run([buckets, are_ready_flush]))
      self.assertEqual(True, are_ready_flush)
      self.assertAllEqual([0, 3335., 6671., 9999.], buckets)
예제 #14
0
  def testSkipEagerUnbatchDynamicShapeMismatch(self):
    ph1 = array_ops.placeholder(dtypes.int32, shape=[None])
    ph2 = array_ops.placeholder(dtypes.int32, shape=None)
    data = dataset_ops.Dataset.from_tensors((ph1, ph2))
    data = data.apply(batching.unbatch())
    iterator = dataset_ops.make_initializable_iterator(data)
    next_element = iterator.get_next()

    with self.cached_session() as sess:
      # Mismatch in the 0th dimension.
      sess.run(
          iterator.initializer,
          feed_dict={
              ph1: np.arange(7).astype(np.int32),
              ph2: np.arange(8).astype(np.int32)
          })
      with self.assertRaises(errors.InvalidArgumentError):
        self.evaluate(next_element)

      # No 0th dimension (i.e. scalar value) for one component.
      sess.run(
          iterator.initializer,
          feed_dict={
              ph1: np.arange(7).astype(np.int32),
              ph2: 7
          })
      with self.assertRaises(errors.InvalidArgumentError):
        self.evaluate(next_element)
예제 #15
0
  def testSlideDatasetInvalid(self, count, window_size, window_shift,
                              window_stride):
    count_t = array_ops.placeholder(dtypes.int64, shape=[])
    window_size_t = array_ops.placeholder(dtypes.int64, shape=[])
    window_shift_t = array_ops.placeholder(dtypes.int64, shape=[])
    window_stride_t = array_ops.placeholder(dtypes.int64, shape=[])

    iterator = (
        dataset_ops.Dataset.range(10).map(lambda x: x).repeat(count_t).apply(
            sliding.sliding_window_batch(
                window_size=window_size_t,
                window_shift=window_shift_t,
                window_stride=window_stride_t)).make_initializable_iterator())
    init_op = iterator.initializer

    with self.cached_session() as sess:
      with self.assertRaises(errors.InvalidArgumentError):
        sess.run(
            init_op,
            feed_dict={
                count_t: count,
                window_size_t: window_size,
                window_shift_t: window_shift,
                window_stride_t: window_stride
            })
예제 #16
0
  def _VerifyValues(self,
                    input_sizes=None,
                    filter_sizes=None,
                    strides=None,
                    dilations=None,
                    padding=None,
                    data_format_src="NHWC",
                    data_format_dst="NHWC",
                    expected=None):
    """Tests that tf.nn.conv2d produces the expected value.

    Args:
      input_sizes: Input tensor dimensions in
        [batch, input_rows, input_cols, input_depth].
      filter_sizes: Filter tensor dimensions in
        [kernel_rows, kernel_cols, input_depth, output_depth].
      strides: Strides.
      dilations: RHS dilations.
      padding: Padding type.
      data_format_src: Data format input is in.
      data_format_dst: Data format verification will run and input is converted
        to.
      expected: Expected output.
    """

    total_size_1 = np.prod(input_sizes)
    total_size_2 = np.prod(filter_sizes)
    x1 = np.arange(1, total_size_1 + 1, dtype=np.float32).reshape(input_sizes)
    x2 = np.arange(1, total_size_2 + 1, dtype=np.float32).reshape(filter_sizes)
    strides = [1] + strides + [1]
    if dilations is None:
      dilations = [1, 1]
    dilations = [1] + dilations + [1]

    # Convert between data formats.
    expected = test_utils.ConvertBetweenDataFormats(expected, data_format_src,
                                                    data_format_dst)
    x1 = test_utils.ConvertBetweenDataFormats(x1, data_format_src,
                                              data_format_dst)
    input_sizes = test_utils.PermuteDimsBetweenDataFormats(
        input_sizes, data_format_src, data_format_dst)
    strides = test_utils.PermuteDimsBetweenDataFormats(strides, data_format_src,
                                                       data_format_dst)
    dilations = test_utils.PermuteDimsBetweenDataFormats(
        dilations, data_format_src, data_format_dst)

    with self.test_session() as sess:
      t1 = array_ops.placeholder(dtypes.float32, shape=input_sizes)
      t2 = array_ops.placeholder(dtypes.float32, shape=filter_sizes)
      with self.test_scope():
        out = nn_ops.conv2d(
            t1,
            t2,
            strides=strides,
            padding=padding,
            data_format=data_format_dst,
            dilations=dilations)

      value = sess.run(out, {t1: x1, t2: x2})
      self.assertAllClose(expected, value, 1e-3)
예제 #17
0
  def testCond(self):
    """Tests that compilation handles switch operators."""

    with self.session(config=NoRewriteSessionConfig()) as session:
      x = array_ops.placeholder(dtypes.float32)
      y = array_ops.placeholder(dtypes.float32)
      c = array_ops.placeholder(dtypes.bool)
      with jit_scope():
        z = x + 1.0
        w = control_flow_ops.cond(c, lambda: z, lambda: y)
        t = math_ops.add(z, w)

      # If JIT compilation chooses to cluster z and t, then execution will
      # deadlock.

      run_metadata = config_pb2.RunMetadata()
      result = test_utils.RunWithWarmup(
          session,
          t, {
              x: np.float32(2),
              y: np.float32(4),
              c: True
          },
          run_metadata=run_metadata,
          options=config_pb2.RunOptions(
              trace_level=config_pb2.RunOptions.FULL_TRACE))
      self.assert_(MetadataHasXlaRunOp(run_metadata))
      self.assertAllClose(result, np.float32(6), rtol=1e-1)
예제 #18
0
  def testReshape(self):
    """Tests an operator with compile-time constant and non-constant inputs."""

    with self.session(config=NoRewriteSessionConfig()) as sess:
      x = array_ops.placeholder(dtypes.float32)
      y = array_ops.placeholder(dtypes.int32)
      with jit_scope():
        # Reshape's first argument is non-constant in the JIT, but its second
        # (shape) argument will be treated as a compile-time constant for
        # each JIT compilation.
        # We do not use a tf.const() argument since we want to ensure the
        # shape is still a run-time argument to the JIT, and not
        # statically known as part of the JIT compilation's input graph.
        z = array_ops.reshape(x, y)
      run_metadata = config_pb2.RunMetadata()
      out = test_utils.RunWithWarmup(
          sess,
          z, {
              x: np.array([1, 2, 3, 4, 5, 6], np.float32),
              y: [-1, 3]
          },
          run_metadata=run_metadata,
          options=config_pb2.RunOptions(
              trace_level=config_pb2.RunOptions.FULL_TRACE))
      self.assert_(MetadataHasXlaRunOp(run_metadata))
      self.assertAllClose(np.array([[1, 2, 3], [4, 5, 6]], np.float32), out)
예제 #19
0
  def testIgnoredArguments(self):
    """Tests that JIT computations can ignore formal parameters."""

    with self.session(config=NoRewriteSessionConfig()) as sess:
      x = array_ops.placeholder(dtypes.int32)
      y = array_ops.placeholder(dtypes.int32)
      with jit_scope():
        z = math_ops.add(x, x)
        w = math_ops.add(y, y)
        # Pulls 'w' into the same compilation via control dependencies.
        with ops.control_dependencies([w]):
          n = control_flow_ops.no_op()
        with ops.control_dependencies([n]):
          t = math_ops.add(z, z)

      run_metadata = config_pb2.RunMetadata()
      out = test_utils.RunWithWarmup(
          sess,
          t, {
              x: np.int32(7),
              y: np.int32(404)
          },
          run_metadata=run_metadata,
          options=config_pb2.RunOptions(
              trace_level=config_pb2.RunOptions.FULL_TRACE))
      self.assert_(MetadataHasXlaRunOp(run_metadata))
      self.assertAllClose(28, out)
예제 #20
0
    def _testConfMatrixOnTensors(self, tf_dtype, np_dtype):
        with self.test_session() as sess:
            m_neg = array_ops.placeholder(dtype=dtypes.float32)
            m_pos = array_ops.placeholder(dtype=dtypes.float32)
            s = array_ops.placeholder(dtype=dtypes.float32)

            neg = random_ops.random_normal([20], mean=m_neg, stddev=s, dtype=dtypes.float32)
            pos = random_ops.random_normal([20], mean=m_pos, stddev=s, dtype=dtypes.float32)

            data = array_ops.concat([neg, pos], 0)
            data = math_ops.cast(math_ops.round(data), tf_dtype)
            data = math_ops.minimum(math_ops.maximum(data, 0), 1)
            lab = array_ops.concat([array_ops.zeros([20], dtype=tf_dtype), array_ops.ones([20], dtype=tf_dtype)], 0)

            cm = confusion_matrix.confusion_matrix(lab, data, dtype=tf_dtype, num_classes=2)

            d, l, cm_out = sess.run([data, lab, cm], {m_neg: 0.0, m_pos: 1.0, s: 1.0})

            truth = np.zeros([2, 2], dtype=np_dtype)
            try:
                range_builder = xrange
            except NameError:  # In Python 3.
                range_builder = range
            for i in range_builder(len(d)):
                truth[l[i], d[i]] += 1

            self.assertEqual(cm_out.dtype, np_dtype)
            self.assertAllClose(cm_out, truth, atol=1e-10)
  def testTensorArrayScatterReadAndGradients(self):
    with self.cached_session() as session, self.test_scope():
      id0 = array_ops.placeholder(dtypes.int32)
      id1 = array_ops.placeholder(dtypes.int32)

      def fn():
        ta = tensor_array_ops.TensorArray(
            dtype=dtypes.float32, tensor_array_name="foo", size=10)

        indices = constant_op.constant([1, 8])
        value = constant_op.constant([[1.0, -1.0], [10.0, -10.0]])

        w = ta.scatter(indices, value)
        r0 = w.read(id0)
        r1 = w.read(id1)

        # Test combined gradients + aggregation of read(0).
        grad = gradients_impl.gradients(
            ys=[r0, r1], xs=[value], grad_ys=[[2.0, 3.0], [4.0, 5.0]])
        return [[r0, r1], grad]

      read_vals, grad_vals = session.run(
          xla.compile(fn), feed_dict={
              id0: 1,
              id1: 8
          })

      self.assertEqual(len(read_vals), 2)
      self.assertEqual(len(grad_vals), 1)
      self.assertAllEqual([1.0, -1.0], read_vals[0])
      self.assertAllEqual([10.0, -10.0], read_vals[1])
      self.assertAllEqual([[2.0, 3.0], [4.0, 5.0]], grad_vals[0])
 def testFeedSparseTensor(self):
   with session.Session() as s:
     indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
     values = np.array([1.0, 2.0]).astype(np.float32)
     shape = np.array([7, 9, 2]).astype(np.int64)
     sp = ops.SparseTensor(
         array_ops.placeholder(dtype=np.int64, shape=(2, 3)),
         array_ops.placeholder(dtype=np.float32, shape=(2,)),
         array_ops.placeholder(dtype=np.int64, shape=(3,)),)
     sp_indices = array_ops.identity(sp.indices)
     sp_values = array_ops.identity(sp.values)
     sp_shape = array_ops.identity(sp.shape)
     sp2 = ops.SparseTensor(sp_indices, sp_values, sp_shape)
     # Feed with tuple
     indices_out, values_out, shape_out = s.run(
         [sp_indices, sp_values, sp_shape], {sp: (indices, values, shape)})
     self.assertAllEqual(indices_out, indices)
     self.assertAllEqual(values_out, values)
     self.assertAllEqual(shape_out, shape)
     # Feed with SparseTensorValue
     indices_out, values_out, shape_out = s.run(
         [sp_indices, sp_values, sp_shape],
         {sp: ops.SparseTensorValue(indices, values, shape)})
     self.assertAllEqual(indices_out, indices)
     self.assertAllEqual(values_out, values)
     self.assertAllEqual(shape_out, shape)
     # Feed with SparseTensorValue, fetch SparseTensorValue
     sp2_out = s.run(sp2, {sp: ops.SparseTensorValue(indices, values, shape)})
     self.assertAllEqual(sp2_out.indices, indices)
     self.assertAllEqual(sp2_out.values, values)
     self.assertAllEqual(sp2_out.shape, shape)
예제 #23
0
 def _testIdentityOperator(self, use_static_shape_):
   for dtype in np.float32, np.float64:
     a_np = np.array([[1., 2.], [3., 4.], [5., 6.]], dtype=dtype)
     x_np = np.array([[2.], [-3.]], dtype=dtype)
     y_np = np.array([[2], [-3.], [5.]], dtype=dtype)
     with self.test_session() as sess:
       if use_static_shape_:
         a = constant_op.constant(a_np, dtype=dtype)
         x = constant_op.constant(x_np, dtype=dtype)
         y = constant_op.constant(y_np, dtype=dtype)
       else:
         a = array_ops.placeholder(dtype)
         x = array_ops.placeholder(dtype)
         y = array_ops.placeholder(dtype)
       id_op = util.identity_operator(a)
       ax = id_op.apply(x)
       aty = id_op.apply_adjoint(y)
       op_shape = ops.convert_to_tensor(id_op.shape)
       if use_static_shape_:
         op_shape_val, ax_val, aty_val = sess.run([op_shape, ax, aty])
       else:
         op_shape_val, ax_val, aty_val = sess.run(
             [op_shape, ax, aty], feed_dict={a: a_np,
                                             x: x_np,
                                             y: y_np})
     self.assertAllEqual(op_shape_val, [3, 2])
     self.assertAllClose(ax_val, x_np)
     self.assertAllClose(aty_val, y_np)
예제 #24
0
  def testGradientsNegativeAxis(self):
    x1 = [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]
    x2 = [[7.0, 8.0, 9.0], [10.0, 11.0, 12.0]]
    inp_tensors = [constant_op.constant(x1, shape=(2, 3), dtype=dtypes.float32),
                   constant_op.constant(x2, shape=(2, 3), dtype=dtypes.float32)]

    # Test concat gradient with axis == -2
    self._testGradientsForAxis(inp_tensors, -2, output_shape=[4, 3])

    # Test concat gradient with unknown-shape tensors.
    x1_placeholder = array_ops.placeholder(dtypes.float32)
    x2_placeholder = array_ops.placeholder(dtypes.float32)
    inp_tensors_placeholders = [x1_placeholder, x2_placeholder]
    feed_dict = {x1_placeholder: x1, x2_placeholder: x2}
    self._testGradientsForAxis(
        inp_tensors_placeholders, -1, output_shape=[2, 6], feed_dict=feed_dict)

    # Test IndexedSlices concat gradient.
    self._testIndexedSlicesGradientsForAxis(
        inp_tensors, -2, output_shape=[2, 3], gather_indexes=[2, 0])

    # We don't support calculating IndexedSlices concat gradient for
    # negative indexes when rank is not known.
    with self.assertRaises(ValueError):
      self._testIndexedSlicesGradientsForAxis(
          inp_tensors_placeholders, -2, output_shape=[2, 3],
          gather_indexes=[2, 0], feed_dict=feed_dict)
예제 #25
0
  def testProbScalarBaseDistributionNonScalarTransformDynamic(self):
    # Scalar batch_shape.
    df = np.asarray(2., dtype=np.float32)
    # Non-scalar batch_shape.
    loc = np.asarray([[0., 0, 0],
                      [1, 2, 3],
                      [1, 0, 1]],
                     dtype=np.float32)
    scale_diag = np.asarray([[1., 2, 3],
                             [2, 3, 4],
                             [4, 5, 6]],
                            dtype=np.float32)
    scale_tril = np.concatenate([[np.diag(scale_diag[i])]
                                 for i in range(len(scale_diag))])
    x = 2. * self._rng.rand(4, 3, 3).astype(np.float32) - 1.

    expected_mst = _FakeVectorStudentT(
        df=np.tile(df, reps=len(scale_diag)),
        loc=loc,
        scale_tril=scale_tril)

    with self.cached_session():
      df_pl = array_ops.placeholder(dtypes.float32, name="df")
      loc_pl = array_ops.placeholder(dtypes.float32, name="loc")
      scale_diag_pl = array_ops.placeholder(dtypes.float32, name="scale_diag")
      feed_dict = {df_pl: df, loc_pl: loc, scale_diag_pl: scale_diag}
      actual_mst = _VectorStudentT(df=df, loc=loc, scale_diag=scale_diag,
                                   validate_args=True)
      self.assertAllClose(expected_mst.log_prob(x),
                          actual_mst.log_prob(x).eval(feed_dict=feed_dict),
                          rtol=0., atol=1e-5)
      self.assertAllClose(expected_mst.prob(x),
                          actual_mst.prob(x).eval(feed_dict=feed_dict),
                          rtol=0., atol=1e-5)
예제 #26
0
  def testProbNonScalarBaseDistributionScalarTransformDynamic(self):
    # Non-scalar batch_shape.
    df = np.asarray([1., 2., 3.], dtype=np.float32)
    # Scalar batch_shape.
    loc = np.asarray([1, 2, 3], dtype=np.float32)
    scale_diag = np.asarray([2, 3, 4], dtype=np.float32)
    scale_tril = np.diag(scale_diag)

    x = 2. * self._rng.rand(4, 3, 3).astype(np.float32) - 1.

    expected_mst = _FakeVectorStudentT(
        df=df,
        loc=np.tile(loc[array_ops.newaxis, :], reps=[len(df), 1]),
        scale_tril=np.tile(scale_tril[array_ops.newaxis, :, :],
                           reps=[len(df), 1, 1]))

    with self.cached_session():
      df_pl = array_ops.placeholder(dtypes.float32, name="df")
      loc_pl = array_ops.placeholder(dtypes.float32, name="loc")
      scale_diag_pl = array_ops.placeholder(dtypes.float32, name="scale_diag")
      feed_dict = {df_pl: df, loc_pl: loc, scale_diag_pl: scale_diag}
      actual_mst = _VectorStudentT(df=df, loc=loc, scale_diag=scale_diag,
                                   validate_args=True)
      self.assertAllClose(expected_mst.log_prob(x),
                          actual_mst.log_prob(x).eval(feed_dict=feed_dict),
                          rtol=0., atol=1e-5)
      self.assertAllClose(expected_mst.prob(x),
                          actual_mst.prob(x).eval(feed_dict=feed_dict),
                          rtol=0., atol=1e-5)
예제 #27
0
  def testSparseCount(self):
    def _sparse(i):
      return sparse_tensor.SparseTensorValue(
          indices=np.array([[0, 0]]),
          values=(i * np.array([1])),
          dense_shape=np.array([1, 1]))

    def make_scan_fn(step):
      return lambda state, _: (_sparse(state.values[0] + step), state)

    start = array_ops.placeholder(dtypes.int32, shape=[])
    step = array_ops.placeholder(dtypes.int32, shape=[])
    take = array_ops.placeholder(dtypes.int64, shape=[])
    iterator = self._counting_dataset(
        _sparse(start),
        make_scan_fn(step)).take(take).make_initializable_iterator()
    next_element = iterator.get_next()

    with self.cached_session() as sess:

      for start_val, step_val, take_val in [(0, 1, 10), (0, 1, 0), (10, 1, 10),
                                            (10, 2, 10), (10, -1, 10),
                                            (10, -2, 10)]:
        sess.run(iterator.initializer,
                 feed_dict={start: start_val, step: step_val, take: take_val})
        for expected, _ in zip(
            itertools.count(start_val, step_val), range(take_val)):
          self.assertEqual(expected, sess.run(next_element).values[0])
        with self.assertRaises(errors.OutOfRangeError):
          sess.run(next_element)
예제 #28
0
 def testUnknownShape(self):
   x = array_ops.placeholder(dtypes.float32)
   num_dimensions = array_ops.placeholder(dtypes.int32)
   ret = random_grad.add_leading_unit_dimensions(x, num_dimensions)
   with self.cached_session() as sess:
     ret_val = sess.run(ret, {x: np.ones([2, 2]), num_dimensions: 2})
   self.assertAllEqual(ret_val.shape, [1, 1, 2, 2])
예제 #29
0
  def testGradientInference(self, data_format):
    # TODO(b/64270657): Use gradient_checker here in addition to comparing with
    # this reference implementation.
    channel = 3
    x_shape = [2, 2, 6, channel]
    scale_shape = [channel]
    grad_val = np.random.random_sample(x_shape).astype(np.float32)
    x_val = np.random.random_sample(x_shape).astype(np.float32)
    scale_val = np.random.random_sample(scale_shape).astype(np.float32)
    mean_val = np.random.random_sample(scale_shape).astype(np.float32)
    var_val = np.random.random_sample(scale_shape).astype(np.float32)
    data_format_src = "NHWC"

    with self.cached_session() as sess, self.test_scope():
      grad_val_converted = test_utils.ConvertBetweenDataFormats(
          grad_val, data_format_src, data_format)
      x_val_converted = test_utils.ConvertBetweenDataFormats(
          x_val, data_format_src, data_format)

      grad = array_ops.placeholder(
          np.float32, shape=x_val_converted.shape, name="grad")
      x = array_ops.placeholder(
          np.float32, shape=x_val_converted.shape, name="x")
      mean = array_ops.placeholder(np.float32, shape=scale_shape, name="mean")
      var = array_ops.placeholder(np.float32, shape=scale_shape, name="var")
      scale = array_ops.placeholder(np.float32, shape=scale_shape, name="scale")
      with self.test_scope():
        out = gen_nn_ops.fused_batch_norm_grad(
            grad,
            x,
            scale,
            mean,
            var,
            data_format=data_format,
            is_training=False)
        grad_x, grad_scale, grad_offset, _, _ = out

      ref_x, ref_scale, ref_offset, _, _ = gen_nn_ops.fused_batch_norm_grad(
          grad, x, scale, mean, var, data_format=data_format, is_training=False)

      grad_x_val, grad_scale_val, grad_offset_val, = sess.run(
          [grad_x, grad_scale, grad_offset], {
              grad: grad_val_converted,
              x: x_val_converted,
              mean: mean_val,
              var: var_val,
              scale: scale_val
          })
      grad_x_ref, grad_scale_ref, grad_offset_ref, = sess.run(
          [ref_x, ref_scale, ref_offset], {
              grad: grad_val_converted,
              x: x_val_converted,
              mean: mean_val,
              var: var_val,
              scale: scale_val
          })

      self.assertAllClose(grad_x_val, grad_x_ref, atol=1e-2)
      self.assertAllClose(grad_scale_val, grad_scale_ref, atol=1e-2)
      self.assertAllClose(grad_offset_val, grad_offset_ref, atol=1e-3)
예제 #30
0
  def simpleTest(self, arg0, arg1, global_jit_level):
    config = config_pb2.ConfigProto()
    config.graph_options.optimizer_options.global_jit_level = global_jit_level

    with session_lib.Session(config=config) as sess:
      a1 = array_ops.placeholder(dtypes.float32, [2, 2], name="a1")
      a2 = array_ops.placeholder(dtypes.float32, [2, 2], name="a2")
      # Two element-wise ops. We need at least two ops since single
      # element clusters are not passed to XLA in fusion_only mode.
      a3 = a1 * a2
      a4 = a3 + a1
      # A matmul to break XLA clustering.
      a5 = math_ops.matmul(a4, a1)
      # Two more element-wise ops.
      a6 = a5 - a4
      a7 = a6 + a2

      run_metadata = config_pb2.RunMetadata()
      output = test_utils.RunWithWarmup(
          sess,
          a7, {
              a1: arg0,
              a2: arg1
          },
          run_metadata=run_metadata,
          options=config_pb2.RunOptions(
              trace_level=config_pb2.RunOptions.FULL_TRACE))

      labels = RunMetadataLabels(run_metadata)

      xla_compile_count = sum("XlaCompile(" in x for x in labels)
      xla_run_count = sum("XlaRun(" in x for x in labels)
      self.assertEqual(xla_compile_count, xla_run_count)

      return output, xla_run_count
예제 #31
0
 def testGraphMode(self):
   graph = ops.Graph()
   with graph.as_default(), context.graph_mode():
     array_ops.placeholder(dtypes.int32)
   self.assertEqual(1, len(graph.get_operations()))
예제 #32
0
 def testWrongShapeForReductionIndices(self):
     reduction_axes = [[1], [2]]
     c_unknown = array_ops.placeholder(dtypes.float32)
     with self.assertRaisesWithPredicateMatch(ValueError,
                                              ".*must be at most rank 1.*"):
         math_ops.reduce_sum(c_unknown, reduction_axes)
예제 #33
0
 def get_placeholder(self):
   if self.is_sparse:
     return array_ops.sparse_placeholder(dtype=self.dtype)
   return array_ops.placeholder(dtype=self.dtype,
                                shape=[None] + list(self.shape[1:]))
예제 #34
0
  def _VerifyOneTest(self,
                     pool_func,
                     pool_grad_func,
                     input_sizes,
                     ksize,
                     strides,
                     padding,
                     data_format,
                     pool_grad_grad_func=None):
    """Verifies the output values of the pooling gradient function.

    Args:
      pool_func: Forward pooling function
      pool_grad_func: Pooling gradient function for pool_grad_func
      input_sizes: Input tensor dimensions.
      ksize: The kernel size dimensions
      strides: The stride dimensions
      padding: Padding type.
      data_format: The data format we use to run the pooling operation.
      pool_grad_grad_func: Second-order gradient function, if available.
    """
    total_size = np.prod(input_sizes)
    # TODO(b/73062247): MaxPoolGradGrad can confuse gradients when x is equally
    # maximal at 16 bits. Switch to np.random.randn when resolved.
    x = np.arange(1, total_size + 1, dtype=np.float32)
    x *= (np.random.randint(2, size=total_size) * 2 - 1)  # Flip signs randomly
    # Verify some specifically interesting values...
    x[np.random.choice(total_size)] = np.inf
    x[np.random.choice(total_size)] = -np.inf
    # TODO(b/74222344): Fix nan handling for max pool grad.
    # x[np.random.choice(total_size)] = np.nan
    x = x.reshape(input_sizes)
    with self.test_session() as sess:
      # Use the forward pool function to compute some corresponding outputs
      # (needed for the CPU device, and we need the shape in both cases).
      with ops.device(self.CPU_DEVICE):
        inputs = array_ops.placeholder(dtypes.float32, shape=input_sizes)
        outputs = pool_func(
            inputs,
            ksize=ksize,
            strides=strides,
            padding=padding,
            data_format="NHWC")

      output_vals = np.array(sess.run(outputs, {inputs: x}))
      output_gradient_vals = np.arange(
          1, output_vals.size + 1, dtype=np.float32)
      output_gradient_vals = output_gradient_vals.reshape(output_vals.shape)
      output_grad_grad_vals = np.arange(1, x.size + 1, dtype=np.float32)
      output_grad_grad_vals = output_grad_grad_vals.reshape(x.shape)

      # Use the Tensorflow CPU pooling gradient to compute the expected input
      # gradients.
      with ops.device(self.CPU_DEVICE):
        output_gradients = array_ops.placeholder(
            dtypes.float32, shape=output_vals.shape)
        expected_input_gradients = pool_grad_func(
            inputs,
            outputs,
            output_gradients,
            ksize=ksize,
            strides=strides,
            padding=padding,
            data_format="NHWC")
        expected_input_gradient_vals = sess.run(
            expected_input_gradients,
            {inputs: x,
             output_gradients: output_gradient_vals})

        output_grad_gradients = array_ops.placeholder(
            dtypes.float32, shape=expected_input_gradient_vals.shape)
        if pool_grad_grad_func is not None:
          expected_grad_gradients = pool_grad_grad_func(
              inputs,
              outputs,
              output_grad_gradients,
              ksize=ksize,
              strides=strides,
              padding=padding,
              data_format="NHWC")
          expected_grad_gradients_vals = sess.run(expected_grad_gradients, {
              inputs: x,
              output_grad_gradients: output_grad_grad_vals
          })

      # Run the gradient op on the XLA device
      with self.test_scope():
        outputs = array_ops.placeholder(dtypes.float32, shape=output_vals.shape)
        xla_inputs = inputs
        xla_outputs = outputs
        xla_output_gradients = output_gradients
        xla_output_grad_gradients = output_grad_gradients
        xla_ksize = ksize
        xla_strides = strides
        if data_format == "NCHW":
          xla_inputs = NHWCToNCHW(inputs)
          xla_outputs = NHWCToNCHW(outputs)
          xla_output_gradients = NHWCToNCHW(output_gradients)
          xla_output_grad_gradients = NHWCToNCHW(output_grad_gradients)
          xla_ksize = NHWCToNCHW(ksize)
          xla_strides = NHWCToNCHW(strides)
        actual_input_gradients = pool_grad_func(
            xla_inputs,
            xla_outputs,
            xla_output_gradients,
            ksize=xla_ksize,
            strides=xla_strides,
            padding=padding,
            data_format=data_format)
        if data_format == "NCHW":
          actual_input_gradients = NCHWToNHWC(actual_input_gradients)
        if pool_grad_grad_func is not None:
          actual_grad_gradients = pool_grad_grad_func(
              xla_inputs,
              xla_outputs,
              xla_output_grad_gradients,
              ksize=xla_ksize,
              strides=xla_strides,
              padding=padding,
              data_format=data_format)
          if data_format == "NCHW":
            actual_grad_gradients = NCHWToNHWC(actual_grad_gradients)
      actual_input_gradients_vals = sess.run(actual_input_gradients, {
          inputs: x,
          outputs: output_vals,
          output_gradients: output_gradient_vals
      })
      # Compare the Tensorflow and XLA results.
      self.assertAllClose(
          expected_input_gradient_vals,
          actual_input_gradients_vals,
          rtol=1e-4,
          atol=1e-6)
      self.assertShapeEqual(actual_input_gradients_vals, inputs)

      if pool_grad_grad_func is not None:
        actual_grad_gradients_vals = sess.run(
            actual_grad_gradients, {
                inputs: x,
                outputs: output_vals,
                output_grad_gradients: output_grad_grad_vals
            })

        # Compare the Tensorflow and XLA results.
        self.assertAllClose(
            expected_grad_gradients_vals,
            actual_grad_gradients_vals,
            rtol=1e-4,
            atol=1e-6)
        self.assertShapeEqual(actual_grad_gradients_vals, outputs)
예제 #35
0
def _export_estimator(estimator,
                      export_dir,
                      signature_fn,
                      input_fn,
                      default_batch_size,
                      exports_to_keep,
                      input_feature_key=None,
                      use_deprecated_input_fn=True,
                      prediction_key=None):
    if use_deprecated_input_fn:
        input_fn = input_fn or _default_input_fn
    elif input_fn is None:
        raise ValueError('input_fn must be defined.')

    checkpoint_path = tf_saver.latest_checkpoint(estimator._model_dir)
    with ops.Graph().as_default() as g:
        contrib_variables.create_global_step(g)

        if use_deprecated_input_fn:
            examples = array_ops.placeholder(dtype=dtypes.string,
                                             shape=[default_batch_size],
                                             name='input_example_tensor')
            features = input_fn(estimator, examples)
        else:
            features, _ = input_fn()
            examples = None
            if input_feature_key is not None:
                examples = features.pop(input_feature_key)

        if not features and not examples:
            raise ValueError('Either features or examples must be defined.')

        predictions = estimator._get_predict_ops(features)
        if prediction_key is not None:
            predictions = predictions[prediction_key]

        # Explicit signature_fn takes priority
        if signature_fn:
            default_signature, named_graph_signatures = signature_fn(
                examples, features, predictions)
        else:
            try:
                # Some estimators provide a signature function.
                # TODO(zakaria): check if the estimator has this function,
                #   raise helpful error if not
                signature_fn = estimator._create_signature_fn()

                default_signature, named_graph_signatures = (signature_fn(
                    examples, features, predictions))
            except AttributeError:
                logging.warn(
                    'Change warning: `signature_fn` will be required after'
                    '2016-08-01.\n'
                    'Using generic signatures for now.  To maintain this behavior, '
                    'pass:\n'
                    '  signature_fn=export.generic_signature_fn\n'
                    'Also consider passing a regression or classification signature; '
                    'see cl/126430915 for an example.')
                default_signature, named_graph_signatures = generic_signature_fn(
                    examples, features, predictions)
        if exports_to_keep is not None:
            exports_to_keep = gc.largest_export_versions(exports_to_keep)
        return _export_graph(g,
                             _get_saver(),
                             checkpoint_path,
                             export_dir,
                             default_graph_signature=default_signature,
                             named_graph_signatures=named_graph_signatures,
                             exports_to_keep=exports_to_keep)
예제 #36
0
    def testSlice(self):
        tf_val = array_ops.placeholder(dtypes.int32, shape=(4, ))[0:2]
        c_val = tensor_util.constant_value_as_shape(tf_val)
        self.assertEqual([None, None], c_val.as_list())

        # begin:end
        tf_val = constant_op.constant([10, 20, 30])[1:3]
        c_val = tensor_util.constant_value_as_shape(tf_val)
        self.assertEqual([20, 30], c_val.as_list())

        # begin:end:stride
        tf_val = array_ops.strided_slice(constant_op.constant([10, 20, 30]),
                                         [1], [3],
                                         strides=[2])
        c_val = tensor_util.constant_value_as_shape(tf_val)
        self.assertEqual([20], c_val.as_list())

        # [1, 2, 16, 37, None, 48]
        tf_val_orig = array_ops.concat(
            [[1, 2, 16, 37],
             array_ops.placeholder(dtypes.int32, shape=(1, )), [48]], 0)

        # begin: no end
        tf_val = tf_val_orig[2:]
        c_val = tensor_util.constant_value_as_shape(tf_val)
        self.assertEqual([16, 37, None, 48], c_val.as_list())

        # begin::negative slice
        tf_val = tf_val_orig[2::-1]
        c_val = tensor_util.constant_value_as_shape(tf_val)
        self.assertEqual([16, 2, 1], c_val.as_list())

        # :end:negative slice
        tf_val = tf_val_orig[:1:-2]
        c_val = tensor_util.constant_value_as_shape(tf_val)
        self.assertEqual([48, 37], c_val.as_list())

        # begin:end:negative slice
        tf_val = tf_val_orig[3:1:-1]
        c_val = tensor_util.constant_value_as_shape(tf_val)
        self.assertEqual([37, 16], c_val.as_list())

        # begin:negative end:slice
        tf_val = tf_val_orig[1:-3:1]
        c_val = tensor_util.constant_value_as_shape(tf_val)
        self.assertEqual([2, 16], c_val.as_list())

        # negative begin::slice
        tf_val = tf_val_orig[-3::1]
        c_val = tensor_util.constant_value_as_shape(tf_val)
        self.assertEqual([37, None, 48], c_val.as_list())

        # negative begin::negative slice
        tf_val = tf_val_orig[-3::-1]
        c_val = tensor_util.constant_value_as_shape(tf_val)
        self.assertEqual([37, 16, 2, 1], c_val.as_list())

        # negative begin:negative end:negative slice
        tf_val = tf_val_orig[-3:-5:-1]
        c_val = tensor_util.constant_value_as_shape(tf_val)
        self.assertEqual([37, 16], c_val.as_list())

        # Do not support shape inference for additional arguments
        tf_val = constant_op.constant([10, 20, 30])[...]
        c_val = tensor_util.constant_value_as_shape(tf_val)
        self.assertEqual([None, None, None], c_val.as_list())

        # Do not support shape inference for tensor slices.
        tf_val = constant_op.constant(
            [10, 20, 30])[array_ops.placeholder(dtypes.int32, shape=()):]
        c_val = tensor_util.constant_value_as_shape(tf_val)
        self.assertEqual(tensor_shape.unknown_shape(), c_val)

        # Do not support shape inference for higher rank
        with self.assertRaises(ValueError):
            tf_val = constant_op.constant([[10], [20], [30]])[:, 0:]
            c_val = tensor_util.constant_value_as_shape(tf_val)
예제 #37
0
 def dynamic_run(fun, x_value):
     x_value = np.array(x_value)
     x = array_ops.placeholder(dtypes.float32, name="x")
     return sess.run(fun(x), feed_dict={x: x_value})
예제 #38
0
    def testPadShapeInference(self):
        a = array_ops.placeholder(np.float32, shape=(2, 3))

        c = xla.pad(a,
                    padding_value=7,
                    padding_low=[2, 1],
                    padding_high=[1, 2],
                    padding_interior=[1, 4])

        self.assertEqual(c.shape, tensor_shape.TensorShape([6, 14]))

        c = xla.pad(a,
                    padding_value=7,
                    padding_low=[2, -2],
                    padding_high=[1, -2],
                    padding_interior=[1, 2])

        self.assertEqual(c.shape, tensor_shape.TensorShape([6, 3]))

        c = xla.pad(array_ops.placeholder(np.float32, shape=(None, 2)),
                    padding_value=7,
                    padding_low=[0, 1],
                    padding_high=[0, 2],
                    padding_interior=[0, 4])
        self.assertEqual(c.shape.as_list(), [None, 9])

        # 0-sized input dimension and interior padding
        c = xla.pad(array_ops.placeholder(np.float32, shape=(2, 0)),
                    padding_value=7,
                    padding_low=[2, 1],
                    padding_high=[1, 1],
                    padding_interior=[1, 2])

        self.assertEqual(c.shape, tensor_shape.TensorShape([6, 2]))

        with self.assertRaisesRegex(
                ValueError,
                'padding_value input must be scalar, found rank 1 '):
            xla.pad(a,
                    padding_value=[0, 1],
                    padding_low=[0, 0],
                    padding_high=[0, 0],
                    padding_interior=[0, 0])

        with self.assertRaisesRegex(
                ValueError, 'padding_low must be a 1D tensor of size 2 '):
            xla.pad(a,
                    padding_value=7,
                    padding_low=[0, 0, 0],
                    padding_high=[0, 0],
                    padding_interior=[0, 0])

        with self.assertRaisesRegex(
                ValueError, 'padding_high must be a 1D tensor of size 2 '):
            xla.pad(a,
                    padding_value=7,
                    padding_low=[0, 0],
                    padding_high=[0, 0, 0],
                    padding_interior=[0, 0])

        with self.assertRaisesRegex(
                ValueError, 'padding_interior must be a 1D tensor of size 2 '):
            xla.pad(a,
                    padding_value=7,
                    padding_low=[0, 0],
                    padding_high=[0, 0],
                    padding_interior=[0])

        with self.assertRaisesRegex(
                ValueError,
                'padding_interior must contain only non-negative values, found -2 '
        ):
            xla.pad(a,
                    padding_value=7,
                    padding_low=[0, 0],
                    padding_high=[0, 0],
                    padding_interior=[-2, 0])

        with self.assertRaisesRegex(
                ValueError,
                'resulting padded dimension has negative size -1 '):
            xla.pad(a,
                    padding_value=7,
                    padding_low=[-3, 0],
                    padding_high=[0, 0],
                    padding_interior=[0, 0])
 def test_condition_tensor_multiple_shapes(self):
   for tensor_shape in [(4, 1), (4, 2), (4, 2, 6), (None, 5, 3)]:
     for conditioning_shape in [(4, 1), (4, 8), (4, 5, 3)]:
       conditioning_utils.condition_tensor(
           array_ops.placeholder(dtypes.float32, tensor_shape),
           array_ops.placeholder(dtypes.float32, conditioning_shape))
예제 #40
0
    def _VerifyValues(self,
                      tensor_in_sizes,
                      filter_in_sizes,
                      stride,
                      padding,
                      data_type,
                      data_format="NHWC"):
        """Verifies the output values of the convolution function.

    Args:
      tensor_in_sizes: Input tensor dimensions in
        [batch, input_rows, input_cols, input_depth].
      filter_in_sizes: Filter tensor dimensions in
        [filter_rows, filter_cols, input_depth, depth_multiplier].
      stride: Stride.
      padding: Padding type.
      data_type: The data type to use.
      data_format: The data_format of the input. "NHWC" or "NCHW".
    """
        total_size_1 = 1
        total_size_2 = 1
        for s in tensor_in_sizes:
            total_size_1 *= s
        for s in filter_in_sizes:
            total_size_2 *= s
        # Initializes the input and filter tensor with numbers incrementing from 1.
        x1 = np.array([f * 1.0 for f in range(1, total_size_1 + 1)],
                      dtype=data_type).reshape(tensor_in_sizes)
        x2 = np.array([f * 1.0 for f in range(1, total_size_2 + 1)],
                      dtype=data_type).reshape(filter_in_sizes)
        with self.cached_session() as sess:
            if data_type == np.float32:
                tolerance = 1e-4
            else:
                self.assertEqual(data_type, np.float64)
                tolerance = 1e-8

            t1 = array_ops.placeholder(shape=tensor_in_sizes, dtype=data_type)
            t2 = array_ops.placeholder(shape=filter_in_sizes, dtype=data_type)

            native_t1 = t1
            strides = [1, stride, stride, 1]
            if data_format == "NCHW":
                # Transpose from NWHC input to NCHW
                # Ex. [4, 5, 5, 48] to [4, 48, 5, 5]
                native_t1 = array_ops.transpose(t1, [0, 3, 1, 2])
                strides = [1, 1, stride, stride]

            with self.test_scope():
                conv_native = nn_ops.depthwise_conv2d_native(
                    native_t1,
                    t2,
                    strides=strides,
                    data_format=data_format,
                    padding=padding)

            if data_format == "NCHW":
                # Transpose back from NCHW to NHWC
                conv_native = array_ops.transpose(conv_native, [0, 2, 3, 1])

            with ops.device("CPU"):
                conv_interface = ReferenceDepthwiseConv2D(
                    t1, t2, strides=[1, stride, stride, 1], padding=padding)

            native_result = sess.run(conv_native, {t1: x1, t2: x2})
            interface_result = sess.run(conv_interface, {t1: x1, t2: x2})

        print("data_type:", data_type, "max diff = ",
              np.amax(np.absolute(native_result - interface_result)))
        self.assertAllClose(np.ravel(native_result),
                            np.ravel(interface_result),
                            rtol=tolerance)
예제 #41
0
def tfassert_eq(_):
    x = array_ops.placeholder(dtypes.int32, name='x_hold')
    y = array_ops.placeholder(dtypes.int32, name='y_hold')
    control_flow_ops.Assert(math_ops.equal(x, y), ['Expected x == y.'],
                            name='assert_eq')
    math_ops.add(x, math_ops.negative(y), name='x_y_diff')
 def test_condition_tensor_from_onehot(self):
   conditioning_utils.condition_tensor_from_onehot(
       array_ops.placeholder(dtypes.float32, (5, 4, 1)),
       array_ops.placeholder(dtypes.float32, (5, 10)))
예제 #43
0
def tfmatmulandadd(_):
    # This tests multiple outputs.
    x = array_ops.placeholder(dtypes.float32, name='x_hold')
    y = array_ops.placeholder(dtypes.float32, name='y_hold')
    math_ops.matmul(x, y, name='x_y_prod')
    math_ops.add(x, y, name='x_y_sum')
예제 #44
0
def tfcond(_):
    p = array_ops.placeholder(dtypes.bool, name='p_hold')
    x = array_ops.placeholder(dtypes.int32, name='x_hold')
    y = array_ops.placeholder(dtypes.int32, name='y_hold')
    z = control_flow_ops.cond(p, lambda: x, lambda: y)
    array_ops.identity(z, name='result')
예제 #45
0
def tfgather(_):
    params = array_ops.placeholder(dtypes.float32, name='params')
    indices = array_ops.placeholder(dtypes.int32, name='indices')
    array_ops.gather(params, indices, name='gather_output')
예제 #46
0
def tftop_k(_):
    x = array_ops.placeholder(dtypes.int32, shape=[5], name='x')
    output = nn_ops.top_k(x, 2, name='values')
    array_ops.identity(output[1], name='indices')
    def testKafkaDataset(self):
        topics = array_ops.placeholder(dtypes.string, shape=[None])
        num_epochs = array_ops.placeholder(dtypes.int64, shape=[])
        batch_size = array_ops.placeholder(dtypes.int64, shape=[])

        repeat_dataset = kafka_dataset_ops.KafkaDataset(
            servers="stream-mnist-a:9092",
            topics=topics,
            group="test",
            eof=True).repeat(num_epochs)
        batch_dataset = repeat_dataset.batch(batch_size)

        iterator = iterator_ops.Iterator.from_structure(
            batch_dataset.output_types)
        init_op = iterator.make_initializer(repeat_dataset)
        init_batch_op = iterator.make_initializer(batch_dataset)
        get_next = iterator.get_next()

        with self.test_session() as sess:
            # Basic test: read from topic 0.
            sess.run(init_op,
                     feed_dict={
                         topics: ["test:0:0:4"],
                         num_epochs: 1
                     })
            for i in range(5):
                self.assertEqual(
                    str("D" + str(i)).encode('utf-8'), sess.run(get_next))
            with self.assertRaises(errors.OutOfRangeError):
                sess.run(get_next)

            # Basic test: read from topic 1.
            sess.run(init_op,
                     feed_dict={
                         topics: ["test:0:5:-1"],
                         num_epochs: 1
                     })
            for i in range(5):
                self.assertEqual(
                    str("D" + str(i + 5)).encode('utf-8'), sess.run(get_next))
            with self.assertRaises(errors.OutOfRangeError):
                sess.run(get_next)

            # Basic test: read from both topics.
            sess.run(init_op,
                     feed_dict={
                         topics: ["test:0:0:4", "test:0:5:-1"],
                         num_epochs: 1
                     })
            for j in range(2):
                for i in range(5):
                    self.assertEqual(
                        str("D" + str(i + j * 5)).encode('utf-8'),
                        sess.run(get_next))
            with self.assertRaises(errors.OutOfRangeError):
                sess.run(get_next)

            # Test repeated iteration through both files.
            sess.run(init_op,
                     feed_dict={
                         topics: ["test:0:0:4", "test:0:5:-1"],
                         num_epochs: 10
                     })
            for _ in range(10):
                for j in range(2):
                    for i in range(5):
                        self.assertEqual(
                            str("D" + str(i + j * 5)).encode('utf-8'),
                            sess.run(get_next))
            with self.assertRaises(errors.OutOfRangeError):
                sess.run(get_next)

            # Test batched and repeated iteration through both files.
            sess.run(init_batch_op,
                     feed_dict={
                         topics: ["test:0:0:4", "test:0:5:-1"],
                         num_epochs: 10,
                         batch_size: 5
                     })
            for _ in range(10):
                self.assertAllEqual(
                    [str("D" + str(i)).encode('utf-8') for i in range(5)],
                    sess.run(get_next))
                self.assertAllEqual(
                    [str("D" + str(i + 5)).encode('utf-8') for i in range(5)],
                    sess.run(get_next))
예제 #48
0
def tfmatmul(_):
    x = array_ops.placeholder(dtypes.float32, name='x_hold')
    y = array_ops.placeholder(dtypes.float32, name='y_hold')
    math_ops.matmul(x, y, name='x_y_prod')