Exemplo n.º 1
0
  def testComplex(self):
    float_inputs_ = [
        0, 1, -1, 0.5, 0.25, 0.125, complex("INF"), complex("NAN"),
        complex("-INF")
    ]
    complex_inputs_ = [(x + (x + 1) * 1j) for x in float_inputs_]

    with self.test_session():
      for dtype in (dtypes.complex64,):
        input_ = array_ops.placeholder(dtype)

        def clean_nans(s_l):
          return [s.decode("ascii").replace("-nan", "nan") for s in s_l]

        output = string_ops.as_string(input_, shortest=True)
        result = output.eval(feed_dict={input_: complex_inputs_})
        self.assertAllEqual(
            clean_nans(result),
            ["(%g,%g)" % (x.real, x.imag) for x in complex_inputs_])

        output = string_ops.as_string(input_, scientific=True)
        result = output.eval(feed_dict={input_: complex_inputs_})
        self.assertAllEqual(
            clean_nans(result),
            ["(%e,%e)" % (x.real, x.imag) for x in complex_inputs_])

        output = string_ops.as_string(input_)
        result = output.eval(feed_dict={input_: complex_inputs_})
        self.assertAllEqual(
            clean_nans(result),
            ["(%f,%f)" % (x.real, x.imag) for x in complex_inputs_])

        output = string_ops.as_string(input_, width=3)
        result = output.eval(feed_dict={input_: complex_inputs_})
        self.assertAllEqual(
            clean_nans(result),
            ["(%03f,%03f)" % (x.real, x.imag) for x in complex_inputs_])

        output = string_ops.as_string(input_, width=3, fill="0", shortest=True)
        result = output.eval(feed_dict={input_: complex_inputs_})
        self.assertAllEqual(
            clean_nans(result),
            ["(%03g,%03g)" % (x.real, x.imag) for x in complex_inputs_])

        output = string_ops.as_string(input_, precision=10, width=3)
        result = output.eval(feed_dict={input_: complex_inputs_})
        self.assertAllEqual(
            clean_nans(result),
            ["(%03.10f,%03.10f)" % (x.real, x.imag) for x in complex_inputs_])

        output = string_ops.as_string(
            input_, precision=10, width=3, fill="0", shortest=True)
        result = output.eval(feed_dict={input_: complex_inputs_})
        self.assertAllEqual(
            clean_nans(result),
            ["(%03.10g,%03.10g)" % (x.real, x.imag) for x in complex_inputs_])

      with self.assertRaisesOpError("Cannot select both"):
        output = string_ops.as_string(input_, scientific=True, shortest=True)
        output.eval(feed_dict={input_: complex_inputs_})
Exemplo n.º 2
0
  def testFloat(self):
    float_inputs_ = [
        0, 1, -1, 0.5, 0.25, 0.125, float("INF"), float("NAN"), float("-INF")
    ]

    with self.test_session():
      for dtype in (dtypes.float32, dtypes.float64):
        input_ = array_ops.placeholder(dtype)

        output = string_ops.as_string(input_, shortest=True)
        result = output.eval(feed_dict={input_: float_inputs_})
        s = lambda strs: [x.decode("ascii") for x in strs]
        self.assertAllEqual(s(result), ["%g" % x for x in float_inputs_])

        output = string_ops.as_string(input_, scientific=True)
        result = output.eval(feed_dict={input_: float_inputs_})
        self.assertAllEqual(s(result), ["%e" % x for x in float_inputs_])

        output = string_ops.as_string(input_)
        result = output.eval(feed_dict={input_: float_inputs_})
        self.assertAllEqual(s(result), ["%f" % x for x in float_inputs_])

        output = string_ops.as_string(input_, width=3)
        result = output.eval(feed_dict={input_: float_inputs_})
        self.assertAllEqual(s(result), ["%3f" % x for x in float_inputs_])

        output = string_ops.as_string(input_, width=3, fill="0")
        result = output.eval(feed_dict={input_: float_inputs_})
        self.assertAllEqual(s(result), ["%03f" % x for x in float_inputs_])

        output = string_ops.as_string(input_, width=3, fill="0", shortest=True)
        result = output.eval(feed_dict={input_: float_inputs_})
        self.assertAllEqual(s(result), ["%03g" % x for x in float_inputs_])

        output = string_ops.as_string(input_, precision=10, width=3)
        result = output.eval(feed_dict={input_: float_inputs_})
        self.assertAllEqual(s(result), ["%03.10f" % x for x in float_inputs_])

        output = string_ops.as_string(
            input_, precision=10, width=3, fill="0", shortest=True)
        result = output.eval(feed_dict={input_: float_inputs_})
        self.assertAllEqual(s(result), ["%03.10g" % x for x in float_inputs_])

      with self.assertRaisesOpError("Cannot select both"):
        output = string_ops.as_string(input_, scientific=True, shortest=True)
        output.eval(feed_dict={input_: float_inputs_})

      with self.assertRaisesOpError("Fill string must be one or fewer"):
        output = string_ops.as_string(input_, fill="ab")
        output.eval(feed_dict={input_: float_inputs_})
Exemplo n.º 3
0
  def testDistribution(self, initial_known):
    classes = np.random.randint(5, size=(20000,))  # Uniformly sampled
    target_dist = [0.9, 0.05, 0.05, 0.0, 0.0]
    initial_dist = [0.2] * 5 if initial_known else None
    classes = math_ops.to_int64(classes)  # needed for Windows build.
    dataset = dataset_ops.Dataset.from_tensor_slices(classes).shuffle(
        200, seed=21).map(lambda c: (c, string_ops.as_string(c))).repeat()

    get_next = dataset.apply(
        resampling.rejection_resample(
            target_dist=target_dist,
            initial_dist=initial_dist,
            class_func=lambda c, _: c,
            seed=27)).make_one_shot_iterator().get_next()

    with self.cached_session() as sess:
      returned = []
      while len(returned) < 4000:
        returned.append(sess.run(get_next))

    returned_classes, returned_classes_and_data = zip(*returned)
    _, returned_data = zip(*returned_classes_and_data)
    self.assertAllEqual([compat.as_bytes(str(c))
                         for c in returned_classes], returned_data)
    total_returned = len(returned_classes)
    class_counts = np.array([
        len([True for v in returned_classes if v == c])
        for c in range(5)])
    returned_dist = class_counts / total_returned
    self.assertAllClose(target_dist, returned_dist, atol=1e-2)
Exemplo n.º 4
0
  def _transform_feature(self, inputs):
    input_tensor = inputs.get(self.key)
    if not isinstance(input_tensor, sparse_tensor_lib.SparseTensor):
      raise ValueError('SparseColumn input must be a SparseTensor.')

    if (input_tensor.dtype != dtypes.string and
        not input_tensor.dtype.is_integer):
      raise ValueError('input tensors dtype must be string or integer. '
                       'dtype: {}, column_name: {}'.format(
                           input_tensor.dtype, self.key))

    if self.dtype.is_integer != input_tensor.dtype.is_integer:
      raise ValueError(
          'Column dtype and SparseTensors dtype must be compatible. '
          'key: {}, column dtype: {}, tensor dtype: {}'.format(
              self.key, self.dtype, input_tensor.dtype))

    if self.dtype == dtypes.string:
      sparse_values = input_tensor.values
    else:
      sparse_values = string_ops.as_string(input_tensor.values)

    sparse_id_values = string_ops.string_to_hash_bucket_fast(
        sparse_values, self.hash_bucket_size, name='lookup')
    return sparse_tensor_lib.SparseTensor(
        input_tensor.indices, sparse_id_values, input_tensor.dense_shape)
 def testStateSaverScopeNames(self):
   batch_size = constant_op.constant(2)
   sqss_scope_name = "unique_scope_name_for_sqss"
   num_unroll = 2
   length = 3
   key = string_ops.string_join([
       "key_", string_ops.as_string(
           math_ops.cast(10000 * random_ops.random_uniform(()), dtypes.int32))
   ])
   padded_length = 4
   sequences = {
       "seq1": np.random.rand(padded_length, 5),
       "seq2": np.random.rand(padded_length, 4, 2)
   }
   context = {"context1": [3, 4]}
   initial_states = {
       "state1": np.random.rand(6, 7),
       "state2": np.random.rand(8)
   }
   state_saver = sqss.SequenceQueueingStateSaver(
       batch_size=batch_size,
       num_unroll=num_unroll,
       input_length=length,
       input_key=key,
       input_sequences=sequences,
       input_context=context,
       initial_states=initial_states,
       name=sqss_scope_name)
   prefetch_op = state_saver.prefetch_op
   next_batch = state_saver.next_batch
   self.assertTrue(
       state_saver.barrier.barrier_ref.name.startswith("%s/" %
                                                       sqss_scope_name))
   self.assertTrue(prefetch_op.name.startswith("%s/" % sqss_scope_name))
   self.assertTrue(next_batch.key.name.startswith("%s/" % sqss_scope_name))
 def input_fn():
   start = random_ops.random_uniform(
       (), minval=0, maxval=sequence_length, dtype=dtypes.int32, seed=seed)
   # Concatenate lyrics_list so inputs and labels wrap when start > 0.
   lyrics_list_concat = lyrics_list + lyrics_list
   inputs_dense = array_ops.slice(lyrics_list_concat, [start],
                                  [sequence_length])
   indices = array_ops.constant(
       [[i, 0] for i in range(sequence_length)], dtype=dtypes.int64)
   dense_shape = [sequence_length, 1]
   inputs = sparse_tensor.SparseTensor(
       indices=indices, values=inputs_dense, dense_shape=dense_shape)
   table = lookup.string_to_index_table_from_tensor(
       mapping=list(vocab), default_value=-1, name='lookup')
   labels = table.lookup(
       array_ops.slice(lyrics_list_concat, [start + 1], [sequence_length]))
   input_key = string_ops.string_join([
       'key_', string_ops.as_string(
           random_ops.random_uniform(
               (),
               minval=0,
               maxval=10000000,
               dtype=dtypes.int32,
               seed=seed))
   ])
   return {'lyrics': inputs, input_key_column_name: input_key}, labels
Exemplo n.º 7
0
  def _testDistribution(self, initial_known):
    classes = np.random.randint(5, size=(20000,))  # Uniformly sampled
    target_dist = [0.9, 0.05, 0.05, 0.0, 0.0]
    initial_dist = [0.2] * 5 if initial_known else None
    iterator = (dataset_ops.Dataset.from_tensor_slices(classes).shuffle(
        200, seed=21).map(lambda c: (c, string_ops.as_string(c))).apply(
            resampling.rejection_resample(
                target_dist=target_dist,
                initial_dist=initial_dist,
                class_func=lambda c, _: c,
                seed=27)).make_initializable_iterator())
    init_op = iterator.initializer
    get_next = iterator.get_next()

    with self.test_session() as sess:
      sess.run(init_op)
      returned = []
      with self.assertRaises(errors.OutOfRangeError):
        while True:
          returned.append(sess.run(get_next))

    returned_classes, returned_classes_and_data = zip(*returned)
    _, returned_data = zip(*returned_classes_and_data)
    self.assertAllEqual([compat.as_bytes(str(c))
                         for c in returned_classes], returned_data)
    total_returned = len(returned_classes)
    # Subsampling rejects a large percentage of the initial data in
    # this case.
    self.assertGreater(total_returned, 20000 * 0.2)
    class_counts = np.array([
        len([True for v in returned_classes if v == c])
        for c in range(5)])
    returned_dist = class_counts / total_returned
    self.assertAllClose(target_dist, returned_dist, atol=1e-2)
Exemplo n.º 8
0
  def testLargeInt(self):
    # Cannot use values outside -128..127 for test, because we're also
    # testing int8
    s = lambda strs: [x.decode("ascii") for x in strs]

    with self.test_session():
      input_ = array_ops.placeholder(dtypes.int32)
      int_inputs_ = [np.iinfo(np.int32).min, np.iinfo(np.int32).max]
      output = string_ops.as_string(input_)
      result = output.eval(feed_dict={input_: int_inputs_})
      self.assertAllEqual(s(result), ["%d" % x for x in int_inputs_])

      input_ = array_ops.placeholder(dtypes.int64)
      int_inputs_ = [np.iinfo(np.int64).min, np.iinfo(np.int64).max]
      output = string_ops.as_string(input_)
      result = output.eval(feed_dict={input_: int_inputs_})
      self.assertAllEqual(s(result), ["%d" % x for x in int_inputs_])
Exemplo n.º 9
0
  def testHalfInt(self):
    s = lambda strs: [x.decode("ascii") for x in strs]

    with self.test_session():
      input_ = array_ops.placeholder(dtypes.int16)
      int_inputs_ = [np.iinfo(np.int16).min, np.iinfo(np.int16).max]
      output = string_ops.as_string(input_)
      result = output.eval(feed_dict={input_: int_inputs_})
      self.assertAllEqual(s(result), ["%d" % x for x in int_inputs_])
 def setUp(self):
   super(BatchSequencesWithStatesTest, self).setUp()
   self.value_length = 4
   ind1 = np.array([
       [0, 0],
       [1, 0], [1, 3], [1, 4],
       [3, 2], [3, 3]])
   val1 = np.array([0, 10, 13, 14, 32, 33])
   shape1 = np.array([self.value_length, 6])
   sp_tensor1 = sparse_tensor.SparseTensor(
       array_ops.constant(ind1, dtypes.int64),
       array_ops.constant(val1, dtypes.int64),
       array_ops.constant(shape1, dtypes.int64))
   ind2 = np.array([
       [0, 0, 1],
       [0, 1, 0],
       [0, 1, 2],
       [1, 0, 3],
       [1, 1, 0],
       [1, 1, 1],
       [1, 1, 2],
       [1, 2, 2]])
   val2 = np.array([1, 10, 12, 103, 150, 149, 150, 122])
   shape2 = np.array([self.value_length, 3, 4])
   sp_tensor2 = sparse_tensor.SparseTensor(
       array_ops.constant(ind2, dtypes.int64),
       array_ops.constant(val2, dtypes.int64),
       array_ops.constant(shape2, dtypes.int64))
   sp_tensor3 = sparse_tensor.SparseTensor(
       array_ops.constant([[1, 9], [2, 2], [2, 10]], dtypes.int64),
       array_ops.constant([7, 15, 2], dtypes.int64),
       array_ops.constant([5, 12], dtypes.int64)
   )
   self.sp_tensor3_expected = sparse_tensor.SparseTensorValue(
       [[0, 1, 9], [0, 2, 2], [0, 2, 10], [1, 1, 9], [1, 2, 2], [1, 2, 10]],
       [7, 15, 2, 7, 15, 2],
       [2, 5, 12]
   )
   self.batch_size = 2
   self.key = string_ops.string_join([
       "key_", string_ops.as_string(
           math_ops.cast(10000 * random_ops.random_uniform(()), dtypes.int32))
   ])
   self.sequences = {
       "seq1": np.random.rand(self.value_length, 5),
       "seq2": np.random.rand(self.value_length, 4, 2),
       "seq3": sp_tensor1,
       "seq4": sp_tensor2}
   self.context = {
       "context1": [3, 4],
       "sp_context": sp_tensor3}
   self.initial_states = {
       "state1": np.random.rand(6, 7),
       "state2": np.random.rand(8)
   }
Exemplo n.º 11
0
  def testBool(self):
    bool_inputs_ = [False, True]
    s = lambda strs: [x.decode("ascii") for x in strs]

    with self.test_session():
      for dtype in (dtypes.bool,):
        input_ = array_ops.placeholder(dtype)

        output = string_ops.as_string(input_)
        result = output.eval(feed_dict={input_: bool_inputs_})
        self.assertAllEqual(s(result), ["false", "true"])
Exemplo n.º 12
0
  def testUnbatchDatasetWithStrings(self):
    data = tuple([math_ops.range(10) for _ in range(3)])
    data = dataset_ops.Dataset.from_tensor_slices(data)
    data = data.map(lambda x, y, z: (x, string_ops.as_string(y), z))
    expected_types = (dtypes.int32, dtypes.string, dtypes.int32)
    data = data.batch(2)
    self.assertEqual(expected_types, data.output_types)
    data = data.apply(batching.unbatch())
    self.assertEqual(expected_types, data.output_types)

    self.assertDatasetProduces(
        data, [(i, compat.as_bytes(str(i)), i) for i in range(10)])
Exemplo n.º 13
0
  def testInt(self):
    # Cannot use values outside -128..127 for test, because we're also
    # testing int8
    int_inputs_ = [0, -1, 1, -128, 127, -101, 101, -0]
    s = lambda strs: [x.decode("ascii") for x in strs]

    with self.test_session():
      for dtype in (dtypes.int32, dtypes.int64, dtypes.int8):
        input_ = array_ops.placeholder(dtype)

        output = string_ops.as_string(input_)
        result = output.eval(feed_dict={input_: int_inputs_})
        self.assertAllEqual(s(result), ["%d" % x for x in int_inputs_])

        output = string_ops.as_string(input_, width=3)
        result = output.eval(feed_dict={input_: int_inputs_})
        self.assertAllEqual(s(result), ["%3d" % x for x in int_inputs_])

        output = string_ops.as_string(input_, width=3, fill="0")
        result = output.eval(feed_dict={input_: int_inputs_})
        self.assertAllEqual(s(result), ["%03d" % x for x in int_inputs_])

      with self.assertRaisesOpError("scientific and shortest"):
        output = string_ops.as_string(input_, scientific=True)
        output.eval(feed_dict={input_: int_inputs_})

      with self.assertRaisesOpError("scientific and shortest"):
        output = string_ops.as_string(input_, shortest=True)
        output.eval(feed_dict={input_: int_inputs_})

      with self.assertRaisesOpError("precision not supported"):
        output = string_ops.as_string(input_, precision=0)
        output.eval(feed_dict={input_: int_inputs_})
 def input_fn():
   start = random_ops.random_uniform(
       (), minval=0, maxval=(np.pi * 2.0), dtype=dtypes.float32, seed=seed)
   sin_curves = math_ops.sin(
       math_ops.linspace(start, (sequence_length - 1) * increment,
                         sequence_length + 1))
   inputs = array_ops.slice(sin_curves, [0], [sequence_length])
   labels = array_ops.slice(sin_curves, [1], [sequence_length])
   input_key = string_ops.string_join([
       'key_',
       string_ops.as_string(math_ops.cast(10000 * start, dtypes.int32))
   ])
   return {'inputs': inputs, input_key_column_name: input_key}, labels
Exemplo n.º 15
0
def _classification_output(scores, n_classes, label_vocabulary=None):
  batch_size = array_ops.shape(scores)[0]
  if label_vocabulary:
    export_class_list = label_vocabulary
  else:
    export_class_list = string_ops.as_string(math_ops.range(n_classes))
  export_output_classes = array_ops.tile(
      input=array_ops.expand_dims(input=export_class_list, axis=0),
      multiples=[batch_size, 1])
  return export_output.ClassificationOutput(
      scores=scores,
      # `ClassificationOutput` requires string classes.
      classes=export_output_classes)
Exemplo n.º 16
0
  def testVariableDevicePlacement(self):
    classes = np.random.randint(5, size=(20000,))  # Uniformly sampled
    target_dist = [0.9, 0.05, 0.05, 0.0, 0.0]
    with ops.device(
        device_setter.replica_device_setter(ps_tasks=1, ps_device="/cpu:0")):
      dataset = (dataset_ops.Dataset.from_tensor_slices(classes)
                 .shuffle(200, seed=21)
                 .map(lambda c: (c, string_ops.as_string(c))))
      dataset = dataset_ops.rejection_resample(
          dataset, target_dist=target_dist, initial_dist=None,
          class_func=lambda c, _: c, seed=27)

      self.assertEqual(1, len(variables.local_variables()))
      self.assertEqual(b"",
                       compat.as_bytes(variables.local_variables()[0].device))
 def input_fn():
   random_sequence = random_ops.random_uniform(
       [sequence_length + 1], 0, 2, dtype=dtypes.int32, seed=seed)
   labels = array_ops.slice(random_sequence, [0], [sequence_length])
   inputs = math_ops.to_float(
       array_ops.slice(random_sequence, [1], [sequence_length]))
   input_key = string_ops.string_join([
       'key_', string_ops.as_string(
           random_ops.random_uniform(
               (),
               minval=0,
               maxval=10000000,
               dtype=dtypes.int32,
               seed=seed))
   ])
   return {'inputs': inputs, input_key_column_name: input_key}, labels
 def setUp(self):
   super(BatchSequencesWithStatesTest, self).setUp()
   self.value_length = 4
   self.batch_size = 2
   self.key = string_ops.string_join([
       "key_", string_ops.as_string(
           math_ops.cast(10000 * random_ops.random_uniform(()), dtypes.int32))
   ])
   self.sequences = {
       "seq1": np.random.rand(self.value_length, 5),
       "seq2": np.random.rand(self.value_length, 4, 2)
   }
   self.context = {"context1": [3, 4]}
   self.initial_states = {
       "state1": np.random.rand(6, 7),
       "state2": np.random.rand(8)
   }
Exemplo n.º 19
0
  def testUnbatchDatasetWithStrings(self):
    data = tuple([math_ops.range(10) for _ in range(3)])
    data = dataset_ops.Dataset.from_tensor_slices(data)
    data = data.map(lambda x, y, z: (x, string_ops.as_string(y), z))
    expected_types = (dtypes.int32, dtypes.string, dtypes.int32)
    data = data.batch(2)
    self.assertEqual(expected_types, data.output_types)
    data = data.apply(batching.unbatch())
    self.assertEqual(expected_types, data.output_types)

    iterator = data.make_one_shot_iterator()
    op = iterator.get_next()

    with self.cached_session() as sess:
      for i in range(10):
        self.assertEqual((i, compat.as_bytes(str(i)), i), sess.run(op))

      with self.assertRaises(errors.OutOfRangeError):
        sess.run(op)
      def input_fn():
        input_key = string_ops.string_join([
            'key_', string_ops.as_string(
                random_ops.random_uniform(
                    (),
                    minval=0,
                    maxval=10000000,
                    dtype=dtypes.int32,
                    seed=seed))
        ])
        features = {}
        random_sequence = random_ops.random_uniform(
            [sequence_length + 1], 0, 2, dtype=dtypes.int32, seed=seed)
        labels = array_ops.slice(random_sequence, [0], [sequence_length])
        inputs = math_ops.to_float(
            array_ops.slice(random_sequence, [1], [sequence_length]))
        features = {'inputs': inputs, input_key_column_name: input_key}

        if mode == model_fn_lib.ModeKeys.INFER:
          input_examples = array_ops.placeholder(dtypes.string)
          features[input_feature_key] = input_examples
          labels = None
        return features, labels
Exemplo n.º 21
0
  def testStateSaverWithTwoSimpleSteps(self):
    with self.cached_session() as sess:
      batch_size_value = 2
      batch_size = constant_op.constant(batch_size_value)
      num_unroll = 2
      length = 3
      key = string_ops.string_join([
          "key_", string_ops.as_string(
              math_ops.cast(10000 * random_ops.random_uniform(()),
                            dtypes.int32))
      ])
      padded_length = 4
      sequences = {
          "seq1": np.random.rand(padded_length, 5),
          "seq2": np.random.rand(padded_length, 4, 2)
      }
      context = {"context1": [3, 4]}
      initial_states = {
          "state1": np.random.rand(6, 7),
          "state2": np.random.rand(8)
      }
      state_saver = sqss.SequenceQueueingStateSaver(
          batch_size=batch_size,
          num_unroll=num_unroll,
          input_length=length,
          input_key=key,
          input_sequences=sequences,
          input_context=context,
          initial_states=initial_states,
          capacity=100)

      initial_key_value_0, _ = sess.run((key, state_saver.prefetch_op))
      initial_key_value_1, _ = sess.run((key, state_saver.prefetch_op))

      initial_key_value_0 = initial_key_value_0.decode("ascii")
      initial_key_value_1 = initial_key_value_1.decode("ascii")

      # Step 1
      next_batch = state_saver.next_batch
      (key_value, next_key_value, seq1_value, seq2_value, context1_value,
       state1_value, state2_value, length_value, _, _) = sess.run(
           (next_batch.key, next_batch.next_key, next_batch.sequences["seq1"],
            next_batch.sequences["seq2"], next_batch.context["context1"],
            next_batch.state("state1"), next_batch.state("state2"),
            next_batch.length,
            next_batch.save_state("state1", next_batch.state("state1") + 1),
            next_batch.save_state("state2", next_batch.state("state2") - 1)))

      expected_first_keys = set(
          ("00000_of_00002:%s" % x).encode("ascii")
          for x in (initial_key_value_0, initial_key_value_1))
      expected_second_keys = set(
          ("00001_of_00002:%s" % x).encode("ascii")
          for x in (initial_key_value_0, initial_key_value_1))
      expected_final_keys = set(
          ("STOP:%s" % x).encode("ascii")
          for x in (initial_key_value_0, initial_key_value_1))

      self.assertEqual(set(key_value), expected_first_keys)
      self.assertEqual(set(next_key_value), expected_second_keys)
      self.assertAllEqual(context1_value,
                          np.tile(context["context1"], (batch_size_value, 1)))
      self.assertAllEqual(seq1_value,
                          np.tile(sequences["seq1"][np.newaxis, 0:2, :],
                                  (batch_size_value, 1, 1)))
      self.assertAllEqual(seq2_value,
                          np.tile(sequences["seq2"][np.newaxis, 0:2, :, :],
                                  (batch_size_value, 1, 1, 1)))
      self.assertAllEqual(state1_value,
                          np.tile(initial_states["state1"],
                                  (batch_size_value, 1, 1)))
      self.assertAllEqual(state2_value,
                          np.tile(initial_states["state2"],
                                  (batch_size_value, 1)))
      self.assertAllEqual(length_value, [2, 2])

      # Step 2
      (key_value, next_key_value, seq1_value, seq2_value, context1_value,
       state1_value, state2_value, length_value, _, _) = sess.run(
           (next_batch.key, next_batch.next_key, next_batch.sequences["seq1"],
            next_batch.sequences["seq2"], next_batch.context["context1"],
            next_batch.state("state1"), next_batch.state("state2"),
            next_batch.length,
            next_batch.save_state("state1", next_batch.state("state1") + 1),
            next_batch.save_state("state2", next_batch.state("state2") - 1)))

      self.assertEqual(set(key_value), expected_second_keys)
      self.assertEqual(set(next_key_value), expected_final_keys)
      self.assertAllEqual(context1_value,
                          np.tile(context["context1"], (batch_size_value, 1)))
      self.assertAllEqual(seq1_value,
                          np.tile(sequences["seq1"][np.newaxis, 2:4, :],
                                  (batch_size_value, 1, 1)))
      self.assertAllEqual(seq2_value,
                          np.tile(sequences["seq2"][np.newaxis, 2:4, :, :],
                                  (batch_size_value, 1, 1, 1)))
      self.assertAllEqual(state1_value, 1 + np.tile(initial_states["state1"],
                                                    (batch_size_value, 1, 1)))
      self.assertAllEqual(state2_value, -1 + np.tile(initial_states["state2"],
                                                     (batch_size_value, 1)))
      self.assertAllEqual(length_value, [1, 1])

      # Finished.  Let's make sure there's nothing left in the barrier.
      self.assertEqual(0, state_saver.barrier.ready_size().eval())
Exemplo n.º 22
0
 def minimize(self, loss, global_step):
   del global_step
   return string_ops.string_join(
       [constant_op.constant(expected_train_result),
        string_ops.as_string(loss, precision=3)])
Exemplo n.º 23
0
def from_dataset_id(processing_mode,
                    service,
                    dataset_id,
                    element_spec=None,
                    job_name=None,
                    consumer_index=None,
                    num_consumers=None,
                    max_outstanding_requests=None,
                    data_transfer_protocol=None,
                    target_workers="AUTO"):
    """Creates a dataset which reads data from the tf.data service.

  This is useful when the dataset is registered by one process, then used in
  another process. When the same process is both registering and reading from
  the dataset, it is simpler to use `tf.data.experimental.service.distribute`
  instead.

  Before using `from_dataset_id`, the dataset must have been registered with the
  tf.data service using `tf.data.experimental.service.register_dataset`.
  `register_dataset` returns a dataset id for the registered dataset. That is
  the `dataset_id` which should be passed to `from_dataset_id`.

  The `element_spec` argument indicates the `tf.TypeSpec`s for the elements
  produced by the dataset. Currently `element_spec` must be explicitly
  specified, and match the dataset registered under `dataset_id`. `element_spec`
  defaults to `None` so that in the future we can support automatically
  discovering the `element_spec` by querying the tf.data service.

  `tf.data.experimental.service.distribute` is a convenience method which
  combines `register_dataset` and `from_dataset_id` into a dataset
  transformation.
  See the documentation for `tf.data.experimental.service.distribute` for more
  detail about how `from_dataset_id` works.

  >>> dispatcher = tf.data.experimental.service.DispatchServer()
  >>> dispatcher_address = dispatcher.target.split("://")[1]
  >>> worker = tf.data.experimental.service.WorkerServer(
  ...     tf.data.experimental.service.WorkerConfig(
  ...         dispatcher_address=dispatcher_address))
  >>> dataset = tf.data.Dataset.range(10)
  >>> dataset_id = tf.data.experimental.service.register_dataset(
  ...     dispatcher.target, dataset)
  >>> dataset = tf.data.experimental.service.from_dataset_id(
  ...     processing_mode="parallel_epochs",
  ...     service=dispatcher.target,
  ...     dataset_id=dataset_id,
  ...     element_spec=dataset.element_spec)
  >>> print(list(dataset.as_numpy_iterator()))
  [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]

  Args:
    processing_mode: A string specifying the policy for how data should be
      processed by tf.data workers. Can be either "parallel_epochs" to have each
      tf.data worker process a copy of the dataset, or "distributed_epoch" to
      split a single iteration of the dataset across all the workers.
    service: A string or a tuple indicating how to connect to the tf.data
      service. If it's a string, it should be in the format
      `[<protocol>://]<address>`, where `<address>` identifies the dispatcher
      address and `<protocol>` can optionally be used to override the default
      protocol to use. If it's a tuple, it should be (protocol, address).
    dataset_id: The id of the dataset to read from. This id is returned by
      `register_dataset` when the dataset is registered with the tf.data
      service.
    element_spec: A nested structure of `tf.TypeSpec`s representing the type of
      elements produced by the dataset. This argument is only required inside a
      tf.function. Use `tf.data.Dataset.element_spec` to get the element spec
      for a given dataset.
    job_name: (Optional.) The name of the job. If provided, it must be a
      non-empty string. This argument makes it possible
      for multiple datasets to share the same job. The default behavior is that
      the dataset creates anonymous, exclusively owned jobs.
    consumer_index: (Optional.) The index of the consumer in the range from `0`
      to `num_consumers`. Must be specified alongside `num_consumers`. When
      specified, consumers will read from the job in a strict round-robin order,
      instead of the default first-come-first-served order.
    num_consumers: (Optional.) The number of consumers which will consume from
      the job. Must be specified alongside `consumer_index`. When specified,
      consumers will read from the job in a strict round-robin order, instead of
      the default first-come-first-served order. When `num_consumers` is
      specified, the dataset must have infinite cardinality to prevent a
      producer from running out of data early and causing consumers to go out of
      sync.
    max_outstanding_requests: (Optional.) A limit on how many elements may be
      requested at the same time. You can use this option to control the amount
      of memory used, since `distribute` won't use more than `element_size` *
      `max_outstanding_requests` of memory.
    data_transfer_protocol: (Optional.) The protocol to use for transferring
      data with the tf.data service. By default, data is transferred using gRPC.
    target_workers: (Optional.) Which workers to read from. If `"AUTO"`, tf.data
      runtime decides which workers to read from. If `"ANY"`, reads from any
      tf.data service workers. If `"LOCAL"`, only reads from local in-processs
      tf.data service workers. `"AUTO"` works well for most cases, while users
      can specify other targets. For example, `"LOCAL"` helps avoid RPCs and
      data copy if every TF worker colocates with a tf.data service worker.
      Defaults to `"AUTO"`.

  Returns:
    A `tf.data.Dataset` which reads from the tf.data service.
  """
    _check_job_name(job_name)
    if job_name is not None:
        job_name = string_ops.string_join(
            ["dataset_id=",
             string_ops.as_string(dataset_id), job_name], "/")

    return _from_dataset_id(processing_mode=processing_mode,
                            service=service,
                            dataset_id=dataset_id,
                            element_spec=element_spec,
                            job_name=job_name,
                            consumer_index=consumer_index,
                            num_consumers=num_consumers,
                            max_outstanding_requests=max_outstanding_requests,
                            data_transfer_protocol=data_transfer_protocol,
                            target_workers=target_workers)
Exemplo n.º 24
0
 def map_fn(x):
   return (x, string_ops.as_string(x), string_ops.as_string(2 * x), 2 * x)
Exemplo n.º 25
0
    def testComplex(self):
        float_inputs_ = [
            0, 1, -1, 0.5, 0.25, 0.125,
            complex("INF"),
            complex("NAN"),
            complex("-INF")
        ]
        complex_inputs_ = [(x + (x + 1) * 1j) for x in float_inputs_]

        with self.cached_session():
            for dtype in (dtypes.complex64, dtypes.complex128):
                input_ = array_ops.placeholder(dtype)

                def clean_nans(s_l):
                    return [
                        s.decode("ascii").replace("-nan", "nan") for s in s_l
                    ]

                output = string_ops.as_string(input_, shortest=True)
                result = output.eval(feed_dict={input_: complex_inputs_})
                self.assertAllEqual(
                    clean_nans(result),
                    ["(%g,%g)" % (x.real, x.imag) for x in complex_inputs_])

                output = string_ops.as_string(input_, scientific=True)
                result = output.eval(feed_dict={input_: complex_inputs_})
                self.assertAllEqual(
                    clean_nans(result),
                    ["(%e,%e)" % (x.real, x.imag) for x in complex_inputs_])

                output = string_ops.as_string(input_)
                result = output.eval(feed_dict={input_: complex_inputs_})
                self.assertAllEqual(
                    clean_nans(result),
                    ["(%f,%f)" % (x.real, x.imag) for x in complex_inputs_])

                output = string_ops.as_string(input_, width=3)
                result = output.eval(feed_dict={input_: complex_inputs_})
                self.assertAllEqual(clean_nans(result), [
                    "(%03f,%03f)" % (x.real, x.imag) for x in complex_inputs_
                ])

                output = string_ops.as_string(input_,
                                              width=3,
                                              fill="0",
                                              shortest=True)
                result = output.eval(feed_dict={input_: complex_inputs_})
                self.assertAllEqual(clean_nans(result), [
                    "(%03g,%03g)" % (x.real, x.imag) for x in complex_inputs_
                ])

                output = string_ops.as_string(input_, precision=10, width=3)
                result = output.eval(feed_dict={input_: complex_inputs_})
                self.assertAllEqual(clean_nans(result), [
                    "(%03.10f,%03.10f)" % (x.real, x.imag)
                    for x in complex_inputs_
                ])

                output = string_ops.as_string(input_,
                                              precision=10,
                                              width=3,
                                              fill="0",
                                              shortest=True)
                result = output.eval(feed_dict={input_: complex_inputs_})
                self.assertAllEqual(clean_nans(result), [
                    "(%03.10g,%03.10g)" % (x.real, x.imag)
                    for x in complex_inputs_
                ])

            with self.assertRaisesOpError("Cannot select both"):
                output = string_ops.as_string(input_,
                                              scientific=True,
                                              shortest=True)
                output.eval(feed_dict={input_: complex_inputs_})
Exemplo n.º 26
0
  def create_estimator_spec(
      self, features, mode, logits, labels=None, train_op_fn=None):
    """See `Head`."""
    with variable_scope.variable_scope(
        None,
        default_name='multi_class_head',
        values=(tuple(six.itervalues(features)) + (labels, logits))):
      logits = _check_logits(logits, self.logits_dimension)

      # Predict.
      pred_keys = prediction_keys.PredictionKeys
      with ops.name_scope(None, 'predictions', (logits,)):
        # class_ids's shape is [batch_size]
        class_ids = math_ops.argmax(logits, 1, name=pred_keys.CLASS_IDS)
        class_ids = array_ops.expand_dims(class_ids, axis=(1,))
        if self._label_vocabulary:
          table = lookup_ops.index_to_string_table_from_tensor(
              vocabulary_list=self._label_vocabulary,
              name='class_string_lookup')
          classes = table.lookup(class_ids)
        else:
          classes = string_ops.as_string(class_ids, name='str_classes')

        probabilities = nn.softmax(logits, name=pred_keys.PROBABILITIES)
        predictions = {
            pred_keys.LOGITS: logits,
            pred_keys.PROBABILITIES: probabilities,
            # Expand to [batch_size, 1]
            pred_keys.CLASS_IDS: class_ids,
            pred_keys.CLASSES: classes,
        }
      if mode == model_fn.ModeKeys.PREDICT:
        batch_size = array_ops.shape(probabilities)[0]
        export_class_list = self._label_vocabulary
        if not export_class_list:
          export_class_list = string_ops.as_string(
              math_ops.range(self._n_classes))
        export_output_classes = array_ops.tile(
            input=array_ops.expand_dims(input=export_class_list, axis=0),
            multiples=[batch_size, 1])
        return model_fn.EstimatorSpec(
            mode=model_fn.ModeKeys.PREDICT,
            predictions=predictions,
            export_outputs={
                '':
                    export_output.ClassificationOutput(
                        scores=probabilities,
                        # `ClassificationOutput` requires string classes.
                        classes=export_output_classes)
            })

      # Eval.
      label_ids = self._label_ids(_check_labels(labels, 1))

      unweighted_loss = losses.sparse_softmax_cross_entropy(
          labels=label_ids, logits=logits, reduction=losses.Reduction.NONE)
      # Restore the squeezed dim, so unweighted_loss matches the weights shape.
      unweighted_loss = array_ops.expand_dims(unweighted_loss, axis=(1,))
      weights = (
          1. if (self._weight_feature_key is None) else
          features[self._weight_feature_key])
      weights = math_ops.to_float(weights, name='weights')
      training_loss = losses.compute_weighted_loss(
          unweighted_loss, weights=weights, reduction=losses.Reduction.SUM)
      if mode == model_fn.ModeKeys.EVAL:
        return model_fn.EstimatorSpec(
            mode=model_fn.ModeKeys.EVAL,
            predictions=predictions,
            loss=training_loss,
            eval_metric_ops=self._eval_metric_ops(
                labels=label_ids,
                probabilities=probabilities,
                logits=logits,
                class_ids=class_ids,
                unweighted_loss=unweighted_loss,
                weights=weights))

      # Train.
      if train_op_fn is None:
        raise ValueError('train_op_fn can not be None.')
      logging_ops.scalar_summary(metric_keys.MetricKeys.LOSS, training_loss)
      logging_ops.scalar_summary(
          metric_keys.MetricKeys.LOSS_MEAN,
          losses.compute_weighted_loss(
              unweighted_loss, weights=weights,
              reduction=losses.Reduction.MEAN))
      return model_fn.EstimatorSpec(
          mode=model_fn.ModeKeys.TRAIN,
          predictions=predictions,
          loss=training_loss,
          train_op=train_op_fn(training_loss))
Exemplo n.º 27
0
    def create_estimator_spec(self,
                              features,
                              mode,
                              logits,
                              labels=None,
                              train_op_fn=None):
        """See `Head`."""
        with variable_scope.variable_scope(
                None,
                default_name='binary_logistic_head',
                values=(tuple(six.itervalues(features)) + (labels, logits))):

            # Predict.
            pred_keys = prediction_keys.PredictionKeys
            logits = _check_logits(logits, self.logits_dimension)
            logistic = math_ops.sigmoid(logits, name=pred_keys.LOGISTIC)
            two_class_logits = array_ops.concat(
                (array_ops.zeros_like(logits), logits),
                1,
                name='two_class_logits')
            scores = nn.softmax(two_class_logits, name=pred_keys.PROBABILITIES)
            classes = array_ops.reshape(math_ops.argmax(two_class_logits,
                                                        axis=1), (-1, 1),
                                        name='classes')
            predictions = {
                pred_keys.LOGITS: logits,
                pred_keys.LOGISTIC: logistic,
                pred_keys.PROBABILITIES: scores,
                pred_keys.CLASS_IDS: classes
            }
            if mode == model_fn.ModeKeys.PREDICT:
                return model_fn.EstimatorSpec(
                    mode=model_fn.ModeKeys.PREDICT,
                    predictions=predictions,
                    export_outputs={
                        '':
                        export_output.ClassificationOutput(
                            scores=scores,
                            # `ClassificationOutput` requires string classes.
                            # TODO(ptucker): Support label_keys.
                            classes=string_ops.as_string(classes,
                                                         name='str_classes'))
                    })

            # Eval.
            labels = _check_labels(math_ops.to_float(labels),
                                   self.logits_dimension)
            unweighted_loss = nn.sigmoid_cross_entropy_with_logits(
                labels=labels, logits=logits, name='loss')
            weights = (1. if (self._weight_feature_key is None) else
                       features[self._weight_feature_key])
            weights = math_ops.to_float(weights, name='weights')
            training_loss = losses.compute_weighted_loss(
                unweighted_loss,
                weights=weights,
                reduction=losses.Reduction.SUM)
            if mode == model_fn.ModeKeys.EVAL:
                return model_fn.EstimatorSpec(
                    mode=model_fn.ModeKeys.EVAL,
                    predictions=predictions,
                    loss=training_loss,
                    eval_metric_ops=self._eval_metric_ops(
                        labels=labels,
                        logits=logits,
                        logistic=logistic,
                        scores=scores,
                        classes=classes,
                        unweighted_loss=unweighted_loss,
                        weights=weights))

            # Train.
            if train_op_fn is None:
                raise ValueError('train_op_fn can not be None.')
            logging_ops.scalar_summary(metric_keys.MetricKeys.LOSS,
                                       training_loss)
            logging_ops.scalar_summary(
                metric_keys.MetricKeys.LOSS_MEAN,
                losses.compute_weighted_loss(unweighted_loss,
                                             weights=weights,
                                             reduction=losses.Reduction.MEAN))
            return model_fn.EstimatorSpec(mode=model_fn.ModeKeys.TRAIN,
                                          predictions=predictions,
                                          loss=training_loss,
                                          train_op=train_op_fn(training_loss))
    def predictions(self, logits, keys=None):
        """Return predictions based on keys. See `base_head.Head` for details.

    Args:
      logits: logits `Tensor` with shape `[D0, D1, ... DN, logits_dimension]`.
        For many applications, the shape is `[batch_size, logits_dimension]`.
      keys: a list or tuple of prediction keys. Each key can be either the class
        variable of prediction_keys.PredictionKeys or its string value, such as:
        prediction_keys.PredictionKeys.CLASSES or 'classes'. If not specified,
        it will return the predictions for all valid keys.

    Returns:
      A dict of predictions.
    """
        pred_keys = prediction_keys.PredictionKeys
        valid_keys = [
            pred_keys.LOGITS, pred_keys.LOGISTIC, pred_keys.PROBABILITIES,
            pred_keys.CLASS_IDS, pred_keys.CLASSES, pred_keys.ALL_CLASS_IDS,
            pred_keys.ALL_CLASSES
        ]

        if keys:
            base_head.check_prediction_keys(keys, valid_keys)
        else:
            keys = valid_keys
        logits = base_head.check_logits_final_dim(logits,
                                                  self.logits_dimension)
        predictions = {}
        with ops.name_scope('predictions', values=(logits, )):
            if pred_keys.LOGITS in keys:
                predictions[pred_keys.LOGITS] = logits
            if pred_keys.LOGISTIC in keys:
                logistic = math_ops.sigmoid(logits, name=pred_keys.LOGISTIC)
                predictions[pred_keys.LOGISTIC] = logistic
            two_class_logits = array_ops.concat(
                (array_ops.zeros_like(logits), logits),
                axis=-1,
                name='two_class_logits')
            if pred_keys.PROBABILITIES in keys:
                probabilities = nn.softmax(two_class_logits,
                                           name=pred_keys.PROBABILITIES)
                predictions[pred_keys.PROBABILITIES] = probabilities
            if pred_keys.CLASS_IDS in keys or pred_keys.CLASSES in keys:
                class_ids = math_ops.argmax(two_class_logits,
                                            axis=-1,
                                            name=pred_keys.CLASS_IDS)
                class_ids = array_ops.expand_dims(class_ids, axis=-1)
                if pred_keys.CLASS_IDS in keys:
                    predictions[pred_keys.CLASS_IDS] = class_ids
                if pred_keys.CLASSES in keys:
                    if self._label_vocabulary is not None:
                        classes = self._class_string_table.lookup(class_ids)
                    else:
                        classes = string_ops.as_string(class_ids,
                                                       name='str_classes')
                    predictions[pred_keys.CLASSES] = classes
            if pred_keys.ALL_CLASS_IDS in keys:
                predictions[pred_keys.ALL_CLASS_IDS] = base_head.all_class_ids(
                    logits, n_classes=2)
            if pred_keys.ALL_CLASSES in keys:
                predictions[pred_keys.ALL_CLASSES] = base_head.all_classes(
                    logits,
                    n_classes=2,
                    label_vocabulary=self._label_vocabulary)
            return predictions
Exemplo n.º 29
0
 def _map_fn(v):
   return {"x": v,
           "y": array_ops.fill([v], v),
           "z": array_ops.fill([3], string_ops.as_string(v))}
Exemplo n.º 30
0
    def create_estimator_spec(self,
                              features,
                              mode,
                              logits,
                              labels=None,
                              train_op_fn=None):
        """See `Head`."""
        # Predict.
        with ops.name_scope(self._name, 'head'):
            with ops.name_scope(None, 'predictions', (logits, )):
                pred_keys = prediction_keys.PredictionKeys
                logits = _check_logits(logits, self.logits_dimension)
                logistic = math_ops.sigmoid(logits, name=pred_keys.LOGISTIC)
                two_class_logits = array_ops.concat(
                    (array_ops.zeros_like(logits), logits),
                    1,
                    name='two_class_logits')
                probabilities = nn.softmax(two_class_logits,
                                           name=pred_keys.PROBABILITIES)
                class_ids = array_ops.reshape(math_ops.argmax(two_class_logits,
                                                              axis=1), (-1, 1),
                                              name='classes')
                if self._label_vocabulary:
                    table = lookup_ops.index_to_string_table_from_tensor(
                        vocabulary_list=self._label_vocabulary,
                        name='class_string_lookup')
                    classes = table.lookup(class_ids)
                else:
                    classes = string_ops.as_string(class_ids,
                                                   name='str_classes')
                predictions = {
                    pred_keys.LOGITS: logits,
                    pred_keys.LOGISTIC: logistic,
                    pred_keys.PROBABILITIES: probabilities,
                    pred_keys.CLASS_IDS: class_ids,
                    pred_keys.CLASSES: classes,
                }
            if mode == model_fn.ModeKeys.PREDICT:
                classifier_output = _classification_output(
                    scores=probabilities,
                    n_classes=2,
                    label_vocabulary=self._label_vocabulary)
                return model_fn.EstimatorSpec(
                    mode=model_fn.ModeKeys.PREDICT,
                    predictions=predictions,
                    export_outputs={
                        _DEFAULT_SERVING_KEY:
                        classifier_output,
                        _CLASSIFY_SERVING_KEY:
                        classifier_output,
                        _REGRESS_SERVING_KEY:
                        export_output.RegressionOutput(value=logistic),
                        _PREDICT_SERVING_KEY:
                        export_output.PredictOutput(predictions)
                    })

            # Eval.
            unweighted_loss, processed_labels = self.create_loss(
                features=features, mode=mode, logits=logits, labels=labels)
            weights = _weights(features, self._weight_column)
            training_loss = losses.compute_weighted_loss(
                unweighted_loss,
                weights=weights,
                reduction=losses.Reduction.SUM)
            if mode == model_fn.ModeKeys.EVAL:
                return model_fn.EstimatorSpec(
                    mode=model_fn.ModeKeys.EVAL,
                    predictions=predictions,
                    loss=training_loss,
                    eval_metric_ops=self._eval_metric_ops(
                        labels=processed_labels,
                        logits=logits,
                        logistic=logistic,
                        class_ids=class_ids,
                        unweighted_loss=unweighted_loss,
                        weights=weights))

            # Train.
            if train_op_fn is None:
                raise ValueError('train_op_fn can not be None.')
        with ops.name_scope(''):
            summary.scalar(
                _summary_key(self._name, metric_keys.MetricKeys.LOSS),
                training_loss)
            summary.scalar(
                _summary_key(self._name, metric_keys.MetricKeys.LOSS_MEAN),
                losses.compute_weighted_loss(unweighted_loss,
                                             weights=weights,
                                             reduction=losses.Reduction.MEAN))
        return model_fn.EstimatorSpec(mode=model_fn.ModeKeys.TRAIN,
                                      predictions=predictions,
                                      loss=training_loss,
                                      train_op=train_op_fn(training_loss))
Exemplo n.º 31
0
  def create_estimator_spec(
      self, features, mode, logits, labels=None, train_op_fn=None):
    """See `Head`."""
    with ops.name_scope('head'):
      logits = _check_logits(logits, self.logits_dimension)

      # Predict.
      pred_keys = prediction_keys.PredictionKeys
      with ops.name_scope(None, 'predictions', (logits,)):
        # class_ids's shape is [batch_size]
        class_ids = math_ops.argmax(logits, 1, name=pred_keys.CLASS_IDS)
        class_ids = array_ops.expand_dims(class_ids, axis=(1,))
        if self._label_vocabulary:
          table = lookup_ops.index_to_string_table_from_tensor(
              vocabulary_list=self._label_vocabulary,
              name='class_string_lookup')
          classes = table.lookup(class_ids)
        else:
          classes = string_ops.as_string(class_ids, name='str_classes')

        probabilities = nn.softmax(logits, name=pred_keys.PROBABILITIES)
        predictions = {
            pred_keys.LOGITS: logits,
            pred_keys.PROBABILITIES: probabilities,
            # Expand to [batch_size, 1]
            pred_keys.CLASS_IDS: class_ids,
            pred_keys.CLASSES: classes,
        }
      if mode == model_fn.ModeKeys.PREDICT:
        batch_size = array_ops.shape(probabilities)[0]
        export_class_list = self._label_vocabulary
        if not export_class_list:
          export_class_list = string_ops.as_string(
              math_ops.range(self._n_classes))
        export_output_classes = array_ops.tile(
            input=array_ops.expand_dims(input=export_class_list, axis=0),
            multiples=[batch_size, 1])
        return model_fn.EstimatorSpec(
            mode=model_fn.ModeKeys.PREDICT,
            predictions=predictions,
            export_outputs={
                '':
                    export_output.ClassificationOutput(
                        scores=probabilities,
                        # `ClassificationOutput` requires string classes.
                        classes=export_output_classes)
            })

      # Eval.
      unweighted_loss, label_ids = self.create_loss(
          features=features, mode=mode, logits=logits, labels=labels)
      weights = _weights(features, self._weight_column)
      training_loss = losses.compute_weighted_loss(
          unweighted_loss, weights=weights, reduction=losses.Reduction.SUM)
      if mode == model_fn.ModeKeys.EVAL:
        return model_fn.EstimatorSpec(
            mode=model_fn.ModeKeys.EVAL,
            predictions=predictions,
            loss=training_loss,
            eval_metric_ops=self._eval_metric_ops(
                labels=label_ids,
                probabilities=probabilities,
                logits=logits,
                class_ids=class_ids,
                unweighted_loss=unweighted_loss,
                weights=weights))

      # Train.
      if train_op_fn is None:
        raise ValueError('train_op_fn can not be None.')
    with ops.name_scope(''):
      summary.scalar(
          _summary_key(self._head_name, metric_keys.MetricKeys.LOSS),
          training_loss)
      summary.scalar(
          _summary_key(self._head_name, metric_keys.MetricKeys.LOSS_MEAN),
          losses.compute_weighted_loss(
              unweighted_loss, weights=weights,
              reduction=losses.Reduction.MEAN))
    return model_fn.EstimatorSpec(
        mode=model_fn.ModeKeys.TRAIN,
        predictions=predictions,
        loss=training_loss,
        train_op=train_op_fn(training_loss))
 def fill_tuple(x):
   filled = array_ops.fill([x], x)
   return (filled, string_ops.as_string(filled))
Exemplo n.º 33
0
  def create_estimator_spec(
      self, features, mode, logits, labels=None, train_op_fn=None):
    """See `Head`."""
    # Predict.
    with ops.name_scope('head'):
      with ops.name_scope(None, 'predictions', (logits,)):
        pred_keys = prediction_keys.PredictionKeys
        logits = _check_logits(logits, self.logits_dimension)
        logistic = math_ops.sigmoid(logits, name=pred_keys.LOGISTIC)
        two_class_logits = array_ops.concat(
            (array_ops.zeros_like(logits), logits), 1, name='two_class_logits')
        scores = nn.softmax(two_class_logits, name=pred_keys.PROBABILITIES)
        class_ids = array_ops.reshape(
            math_ops.argmax(two_class_logits, axis=1), (-1, 1), name='classes')
        if self._label_vocabulary:
          table = lookup_ops.index_to_string_table_from_tensor(
              vocabulary_list=self._label_vocabulary,
              name='class_string_lookup')
          classes = table.lookup(class_ids)
        else:
          classes = string_ops.as_string(class_ids, name='str_classes')
        predictions = {
            pred_keys.LOGITS: logits,
            pred_keys.LOGISTIC: logistic,
            pred_keys.PROBABILITIES: scores,
            pred_keys.CLASS_IDS: class_ids,
            pred_keys.CLASSES: classes,
        }
      if mode == model_fn.ModeKeys.PREDICT:
        batch_size = array_ops.shape(logistic)[0]
        export_class_list = self._label_vocabulary
        if not export_class_list:
          export_class_list = string_ops.as_string([0, 1])
        export_output_classes = array_ops.tile(
            input=array_ops.expand_dims(input=export_class_list, axis=0),
            multiples=[batch_size, 1])
        classifier_output = export_output.ClassificationOutput(
            scores=scores,
            # `ClassificationOutput` requires string classes.
            classes=export_output_classes)
        return model_fn.EstimatorSpec(
            mode=model_fn.ModeKeys.PREDICT,
            predictions=predictions,
            export_outputs={
                '': classifier_output,  # to be same as other heads.
                'classification': classifier_output,  # to be called by name.
                _DEFAULT_SERVING_KEY: classifier_output,  # default
                'regression': export_output.RegressionOutput(value=logistic)
            })

      # Eval.
      unweighted_loss, processed_labels = self.create_loss(
          features=features, mode=mode, logits=logits, labels=labels)
      weights = _weights(features, self._weight_column)
      training_loss = losses.compute_weighted_loss(
          unweighted_loss, weights=weights, reduction=losses.Reduction.SUM)
      if mode == model_fn.ModeKeys.EVAL:
        return model_fn.EstimatorSpec(
            mode=model_fn.ModeKeys.EVAL,
            predictions=predictions,
            loss=training_loss,
            eval_metric_ops=self._eval_metric_ops(
                labels=processed_labels,
                logits=logits,
                logistic=logistic,
                scores=scores,
                class_ids=class_ids,
                unweighted_loss=unweighted_loss,
                weights=weights))

      # Train.
      if train_op_fn is None:
        raise ValueError('train_op_fn can not be None.')
    with ops.name_scope(''):
      summary.scalar(
          _summary_key(self._head_name, metric_keys.MetricKeys.LOSS),
          training_loss)
      summary.scalar(
          _summary_key(self._head_name, metric_keys.MetricKeys.LOSS_MEAN),
          losses.compute_weighted_loss(
              unweighted_loss, weights=weights,
              reduction=losses.Reduction.MEAN))
    return model_fn.EstimatorSpec(
        mode=model_fn.ModeKeys.TRAIN,
        predictions=predictions,
        loss=training_loss,
        train_op=train_op_fn(training_loss))
Exemplo n.º 34
0
    def create_estimator_spec(self,
                              features,
                              mode,
                              logits,
                              labels=None,
                              train_op_fn=None):
        """See `Head`."""
        with variable_scope.variable_scope(
                None,
                default_name='multi_class_head',
                values=(tuple(six.itervalues(features)) + (labels, logits))):
            logits = _check_logits(logits, self.logits_dimension)

            # Predict.
            pred_keys = prediction_keys.PredictionKeys
            with ops.name_scope(None, 'predictions', (logits, )):
                # class_ids's shape is [batch_size]
                class_ids = math_ops.argmax(logits, 1, name=pred_keys.CLASSES)
                probabilities = nn.softmax(logits,
                                           name=pred_keys.PROBABILITIES)
                predictions = {
                    pred_keys.LOGITS:
                    logits,
                    pred_keys.PROBABILITIES:
                    probabilities,
                    # Expand to [batch_size, 1]
                    pred_keys.CLASSES:
                    array_ops.expand_dims(class_ids, axis=(1, ))
                }
            if mode == model_fn.ModeKeys.PREDICT:
                batch_size = array_ops.shape(probabilities)[0]
                output_classes = array_ops.tile(input=array_ops.expand_dims(
                    input=math_ops.range(self._n_classes), axis=0),
                                                multiples=[batch_size, 1])
                return model_fn.EstimatorSpec(
                    mode=model_fn.ModeKeys.PREDICT,
                    predictions=predictions,
                    export_outputs={
                        '':
                        export_output.ClassificationOutput(
                            scores=probabilities,
                            # `ClassificationOutput` requires string classes.
                            # TODO(xiejw): Support label_keys or label_column
                            classes=string_ops.as_string(output_classes,
                                                         name='str_classes'))
                    })

            # Eval.
            labels = _check_labels(labels, 1)
            # Check that we got integer for classification.
            if not labels.dtype.is_integer:
                raise ValueError('Labels dtype should be integer '
                                 'Instead got %s.' % labels.dtype)
            assert_less = check_ops.assert_less(
                labels,
                ops.convert_to_tensor(self._n_classes, dtype=labels.dtype),
                message='Label IDs must < n_classes')
            assert_greater = check_ops.assert_non_negative(
                labels, message='Label Ids must >= 0')
            with ops.control_dependencies((assert_less, assert_greater)):
                labels = array_ops.identity(labels)

            unweighted_loss = losses.sparse_softmax_cross_entropy(
                labels=labels, logits=logits, reduction=losses.Reduction.NONE)
            # Restore the squeezed dim, so unweighted_loss matches the weights shape.
            unweighted_loss = array_ops.expand_dims(unweighted_loss,
                                                    axis=(1, ))
            weights = (1. if (self._weight_column_name is None) else
                       features[self._weight_column_name])
            weights = math_ops.to_float(weights, name='weights')
            training_loss = losses.compute_weighted_loss(
                unweighted_loss,
                weights=weights,
                reduction=losses.Reduction.SUM)
            if mode == model_fn.ModeKeys.EVAL:
                return model_fn.EstimatorSpec(
                    mode=model_fn.ModeKeys.EVAL,
                    predictions=predictions,
                    loss=training_loss,
                    eval_metric_ops=self._eval_metric_ops(
                        labels=labels,
                        probabilities=probabilities,
                        logits=logits,
                        class_ids=class_ids,
                        unweighted_loss=unweighted_loss,
                        weights=weights))

            # Train.
            if train_op_fn is None:
                raise ValueError('train_op_fn can not be None.')
            logging_ops.scalar_summary(metric_keys.MetricKeys.LOSS,
                                       training_loss)
            logging_ops.scalar_summary(
                metric_keys.MetricKeys.LOSS_MEAN,
                losses.compute_weighted_loss(unweighted_loss,
                                             weights=weights,
                                             reduction=losses.Reduction.MEAN))
            return model_fn.EstimatorSpec(mode=model_fn.ModeKeys.TRAIN,
                                          predictions=predictions,
                                          loss=training_loss,
                                          train_op=train_op_fn(training_loss))
Exemplo n.º 35
0
 def reduce_func(key, dataset):
     shard_filename = string_ops.string_join(
         [filename, string_ops.as_string(key)])
     writer = writers.TFRecordWriter(shard_filename)
     writer.write(dataset.map(lambda _, x: x))
     return dataset_ops.Dataset.from_tensors(shard_filename)
Exemplo n.º 36
0
    def testFloat(self):
        float_inputs_ = [
            0, 1, -1, 0.5, 0.25, 0.125,
            float("INF"),
            float("NAN"),
            float("-INF")
        ]

        with self.cached_session():
            for dtype in (dtypes.half, dtypes.bfloat16, dtypes.float32,
                          dtypes.float64):
                input_ = array_ops.placeholder(dtype)

                output = string_ops.as_string(input_, shortest=True)
                result = output.eval(feed_dict={input_: float_inputs_})
                s = lambda strs: [x.decode("ascii") for x in strs]
                self.assertAllEqual(s(result),
                                    ["%g" % x for x in float_inputs_])

                output = string_ops.as_string(input_, scientific=True)
                result = output.eval(feed_dict={input_: float_inputs_})
                self.assertAllEqual(s(result),
                                    ["%e" % x for x in float_inputs_])

                output = string_ops.as_string(input_)
                result = output.eval(feed_dict={input_: float_inputs_})
                self.assertAllEqual(s(result),
                                    ["%f" % x for x in float_inputs_])

                output = string_ops.as_string(input_, width=3)
                result = output.eval(feed_dict={input_: float_inputs_})
                self.assertAllEqual(s(result),
                                    ["%3f" % x for x in float_inputs_])

                output = string_ops.as_string(input_, width=3, fill="0")
                result = output.eval(feed_dict={input_: float_inputs_})
                self.assertAllEqual(s(result),
                                    ["%03f" % x for x in float_inputs_])

                output = string_ops.as_string(input_,
                                              width=3,
                                              fill="0",
                                              shortest=True)
                result = output.eval(feed_dict={input_: float_inputs_})
                self.assertAllEqual(s(result),
                                    ["%03g" % x for x in float_inputs_])

                output = string_ops.as_string(input_, precision=10, width=3)
                result = output.eval(feed_dict={input_: float_inputs_})
                self.assertAllEqual(s(result),
                                    ["%03.10f" % x for x in float_inputs_])

                output = string_ops.as_string(input_,
                                              precision=10,
                                              width=3,
                                              fill="0",
                                              shortest=True)
                result = output.eval(feed_dict={input_: float_inputs_})
                self.assertAllEqual(s(result),
                                    ["%03.10g" % x for x in float_inputs_])

            with self.assertRaisesOpError("Cannot select both"):
                output = string_ops.as_string(input_,
                                              scientific=True,
                                              shortest=True)
                output.eval(feed_dict={input_: float_inputs_})

            with self.assertRaisesOpError("Fill string must be one or fewer"):
                output = string_ops.as_string(input_, fill="ab")
                output.eval(feed_dict={input_: float_inputs_})
Exemplo n.º 37
0
 def fill_tuple(x):
     filled = array_ops.fill([x], x)
     return (filled, string_ops.as_string(filled), {
         'structure': string_ops.as_string(filled)
     })
Exemplo n.º 38
0
    def create_estimator_spec(self,
                              features,
                              mode,
                              logits,
                              labels=None,
                              train_op_fn=None):
        """See `Head`."""
        with variable_scope.variable_scope(
                None,
                default_name='multi_class_head',
                values=(tuple(six.itervalues(features)) + (labels, logits))):
            logits = _check_logits(logits, self.logits_dimension)

            # Predict.
            pred_keys = prediction_keys.PredictionKeys
            with ops.name_scope(None, 'predictions', (logits, )):
                # class_ids's shape is [batch_size]
                class_ids = math_ops.argmax(logits,
                                            1,
                                            name=pred_keys.CLASS_IDS)
                class_ids = array_ops.expand_dims(class_ids, axis=(1, ))
                if self._label_vocabulary:
                    table = lookup_ops.index_to_string_table_from_tensor(
                        vocabulary_list=self._label_vocabulary,
                        name='class_string_lookup')
                    classes = table.lookup(class_ids)
                else:
                    classes = string_ops.as_string(class_ids,
                                                   name='str_classes')

                probabilities = nn.softmax(logits,
                                           name=pred_keys.PROBABILITIES)
                predictions = {
                    pred_keys.LOGITS: logits,
                    pred_keys.PROBABILITIES: probabilities,
                    # Expand to [batch_size, 1]
                    pred_keys.CLASS_IDS: class_ids,
                    pred_keys.CLASSES: classes,
                }
            if mode == model_fn.ModeKeys.PREDICT:
                batch_size = array_ops.shape(probabilities)[0]
                export_class_list = self._label_vocabulary
                if not export_class_list:
                    export_class_list = string_ops.as_string(
                        math_ops.range(self._n_classes))
                export_output_classes = array_ops.tile(
                    input=array_ops.expand_dims(input=export_class_list,
                                                axis=0),
                    multiples=[batch_size, 1])
                return model_fn.EstimatorSpec(
                    mode=model_fn.ModeKeys.PREDICT,
                    predictions=predictions,
                    export_outputs={
                        '':
                        export_output.ClassificationOutput(
                            scores=probabilities,
                            # `ClassificationOutput` requires string classes.
                            classes=export_output_classes)
                    })

            # Eval.
            label_ids = self._label_ids(
                _check_labels(_maybe_expand_dim(labels), 1))

            unweighted_loss = losses.sparse_softmax_cross_entropy(
                labels=label_ids,
                logits=logits,
                reduction=losses.Reduction.NONE)
            # Restore the squeezed dim, so unweighted_loss matches the weights shape.
            unweighted_loss = array_ops.expand_dims(unweighted_loss,
                                                    axis=(1, ))
            weights = (1. if (self._weight_feature_key is None) else
                       features[self._weight_feature_key])
            weights = _maybe_expand_dim(
                math_ops.to_float(weights, name='weights'))
            training_loss = losses.compute_weighted_loss(
                unweighted_loss,
                weights=weights,
                reduction=losses.Reduction.SUM)
            if mode == model_fn.ModeKeys.EVAL:
                return model_fn.EstimatorSpec(
                    mode=model_fn.ModeKeys.EVAL,
                    predictions=predictions,
                    loss=training_loss,
                    eval_metric_ops=self._eval_metric_ops(
                        labels=label_ids,
                        probabilities=probabilities,
                        logits=logits,
                        class_ids=class_ids,
                        unweighted_loss=unweighted_loss,
                        weights=weights))

            # Train.
            if train_op_fn is None:
                raise ValueError('train_op_fn can not be None.')
            logging_ops.scalar_summary(metric_keys.MetricKeys.LOSS,
                                       training_loss)
            logging_ops.scalar_summary(
                metric_keys.MetricKeys.LOSS_MEAN,
                losses.compute_weighted_loss(unweighted_loss,
                                             weights=weights,
                                             reduction=losses.Reduction.MEAN))
            return model_fn.EstimatorSpec(mode=model_fn.ModeKeys.TRAIN,
                                          predictions=predictions,
                                          loss=training_loss,
                                          train_op=train_op_fn(training_loss))
Exemplo n.º 39
0
  def create_estimator_spec(
      self, features, mode, logits, labels=None, train_op_fn=None):
    """See `Head`."""
    with ops.name_scope('head'):
      logits = _check_logits(logits, self.logits_dimension)

      # Predict.
      pred_keys = prediction_keys.PredictionKeys
      with ops.name_scope(None, 'predictions', (logits,)):
        # class_ids's shape is [batch_size]
        class_ids = math_ops.argmax(logits, 1, name=pred_keys.CLASS_IDS)
        class_ids = array_ops.expand_dims(class_ids, axis=(1,))
        if self._label_vocabulary:
          table = lookup_ops.index_to_string_table_from_tensor(
              vocabulary_list=self._label_vocabulary,
              name='class_string_lookup')
          classes = table.lookup(class_ids)
        else:
          classes = string_ops.as_string(class_ids, name='str_classes')

        probabilities = nn.softmax(logits, name=pred_keys.PROBABILITIES)
        predictions = {
            pred_keys.LOGITS: logits,
            pred_keys.PROBABILITIES: probabilities,
            # Expand to [batch_size, 1]
            pred_keys.CLASS_IDS: class_ids,
            pred_keys.CLASSES: classes,
        }
      if mode == model_fn.ModeKeys.PREDICT:
        batch_size = array_ops.shape(probabilities)[0]
        export_class_list = self._label_vocabulary
        if not export_class_list:
          export_class_list = string_ops.as_string(
              math_ops.range(self._n_classes))
        export_output_classes = array_ops.tile(
            input=array_ops.expand_dims(input=export_class_list, axis=0),
            multiples=[batch_size, 1])
        classifier_output = export_output.ClassificationOutput(
            scores=probabilities,
            # `ClassificationOutput` requires string classes.
            classes=export_output_classes)
        return model_fn.EstimatorSpec(
            mode=model_fn.ModeKeys.PREDICT,
            predictions=predictions,
            export_outputs={
                _DEFAULT_SERVING_KEY: classifier_output,
                _CLASSIFY_SERVING_KEY: classifier_output,
                _PREDICT_SERVING_KEY: export_output.PredictOutput(predictions)
            })

      # Eval.
      unweighted_loss, label_ids = self.create_loss(
          features=features, mode=mode, logits=logits, labels=labels)
      weights = _weights(features, self._weight_column)
      training_loss = losses.compute_weighted_loss(
          unweighted_loss, weights=weights, reduction=losses.Reduction.SUM)
      if mode == model_fn.ModeKeys.EVAL:
        return model_fn.EstimatorSpec(
            mode=model_fn.ModeKeys.EVAL,
            predictions=predictions,
            loss=training_loss,
            eval_metric_ops=self._eval_metric_ops(
                labels=label_ids,
                probabilities=probabilities,
                logits=logits,
                class_ids=class_ids,
                unweighted_loss=unweighted_loss,
                weights=weights))

      # Train.
      if train_op_fn is None:
        raise ValueError('train_op_fn can not be None.')
    with ops.name_scope(''):
      summary.scalar(
          _summary_key(self._name, metric_keys.MetricKeys.LOSS),
          training_loss)
      summary.scalar(
          _summary_key(self._name, metric_keys.MetricKeys.LOSS_MEAN),
          losses.compute_weighted_loss(
              unweighted_loss, weights=weights,
              reduction=losses.Reduction.MEAN))
    return model_fn.EstimatorSpec(
        mode=model_fn.ModeKeys.TRAIN,
        predictions=predictions,
        loss=training_loss,
        train_op=train_op_fn(training_loss))
Exemplo n.º 40
0
  def create_estimator_spec(
      self, features, mode, logits, labels=None, train_op_fn=None):
    """See `Head`."""
    with variable_scope.variable_scope(
        None, default_name='binary_logistic_head',
        values=(tuple(six.itervalues(features)) + (labels, logits))):

      # Predict.
      pred_keys = prediction_keys.PredictionKeys
      logits = _check_logits(logits, self.logits_dimension)
      logistic = math_ops.sigmoid(logits, name=pred_keys.LOGISTIC)
      two_class_logits = array_ops.concat(
          (array_ops.zeros_like(logits), logits), 1, name='two_class_logits')
      scores = nn.softmax(two_class_logits, name=pred_keys.PROBABILITIES)
      classes = array_ops.reshape(
          math_ops.argmax(two_class_logits, axis=1), (-1, 1), name='classes')
      predictions = {
          pred_keys.LOGITS: logits,
          pred_keys.LOGISTIC: logistic,
          pred_keys.PROBABILITIES: scores,
          pred_keys.CLASS_IDS: classes
      }
      if mode == model_fn.ModeKeys.PREDICT:
        return model_fn.EstimatorSpec(
            mode=model_fn.ModeKeys.PREDICT,
            predictions=predictions,
            export_outputs={'': export_output.ClassificationOutput(
                scores=scores,
                # `ClassificationOutput` requires string classes.
                # TODO(ptucker): Support label_keys.
                classes=string_ops.as_string(classes, name='str_classes'))})

      # Eval.
      labels = _check_labels(math_ops.to_float(labels), self.logits_dimension)
      unweighted_loss = nn.sigmoid_cross_entropy_with_logits(
          labels=labels, logits=logits, name='loss')
      weights = (
          1. if (self._weight_feature_key is None) else
          features[self._weight_feature_key])
      weights = math_ops.to_float(weights, name='weights')
      training_loss = losses.compute_weighted_loss(
          unweighted_loss, weights=weights, reduction=losses.Reduction.SUM)
      if mode == model_fn.ModeKeys.EVAL:
        return model_fn.EstimatorSpec(
            mode=model_fn.ModeKeys.EVAL,
            predictions=predictions,
            loss=training_loss,
            eval_metric_ops=self._eval_metric_ops(
                labels=labels,
                logits=logits,
                logistic=logistic,
                scores=scores,
                classes=classes,
                unweighted_loss=unweighted_loss,
                weights=weights))

      # Train.
      if train_op_fn is None:
        raise ValueError('train_op_fn can not be None.')
      logging_ops.scalar_summary(metric_keys.MetricKeys.LOSS, training_loss)
      logging_ops.scalar_summary(
          metric_keys.MetricKeys.LOSS_MEAN,
          losses.compute_weighted_loss(
              unweighted_loss, weights=weights,
              reduction=losses.Reduction.MEAN))
      return model_fn.EstimatorSpec(
          mode=model_fn.ModeKeys.TRAIN,
          predictions=predictions,
          loss=training_loss,
          train_op=train_op_fn(training_loss))
Exemplo n.º 41
0
  def create_estimator_spec(
      self, features, mode, logits, labels=None, train_op_fn=None):
    """See `Head`."""
    # Predict.
    with ops.name_scope('head'):
      with ops.name_scope(None, 'predictions', (logits,)):
        pred_keys = prediction_keys.PredictionKeys
        logits = _check_logits(logits, self.logits_dimension)
        logistic = math_ops.sigmoid(logits, name=pred_keys.LOGISTIC)
        two_class_logits = array_ops.concat(
            (array_ops.zeros_like(logits), logits), 1, name='two_class_logits')
        scores = nn.softmax(two_class_logits, name=pred_keys.PROBABILITIES)
        class_ids = array_ops.reshape(
            math_ops.argmax(two_class_logits, axis=1), (-1, 1), name='classes')
        if self._label_vocabulary:
          table = lookup_ops.index_to_string_table_from_tensor(
              vocabulary_list=self._label_vocabulary,
              name='class_string_lookup')
          classes = table.lookup(class_ids)
        else:
          classes = string_ops.as_string(class_ids, name='str_classes')
        predictions = {
            pred_keys.LOGITS: logits,
            pred_keys.LOGISTIC: logistic,
            pred_keys.PROBABILITIES: scores,
            pred_keys.CLASS_IDS: class_ids,
            pred_keys.CLASSES: classes,
        }
      if mode == model_fn.ModeKeys.PREDICT:
        batch_size = array_ops.shape(logistic)[0]
        export_class_list = self._label_vocabulary
        if not export_class_list:
          export_class_list = string_ops.as_string([0, 1])
        export_output_classes = array_ops.tile(
            input=array_ops.expand_dims(input=export_class_list, axis=0),
            multiples=[batch_size, 1])
        classifier_output = export_output.ClassificationOutput(
            scores=scores,
            # `ClassificationOutput` requires string classes.
            classes=export_output_classes)
        return model_fn.EstimatorSpec(
            mode=model_fn.ModeKeys.PREDICT,
            predictions=predictions,
            export_outputs={
                '': classifier_output,  # to be same as other heads.
                'classification': classifier_output,  # to be called by name.
                _DEFAULT_SERVING_KEY: classifier_output,  # default
                'regression': export_output.RegressionOutput(value=logistic)
            })

      # Eval.
      labels = _check_labels(_maybe_expand_dim(labels), self.logits_dimension)
      if self._label_vocabulary is not None:
        labels = lookup_ops.index_table_from_tensor(
            vocabulary_list=tuple(self._label_vocabulary),
            name='class_id_lookup').lookup(labels)
      labels = math_ops.to_float(labels)
      labels = _assert_range(labels, 2)
      unweighted_loss = nn.sigmoid_cross_entropy_with_logits(
          labels=labels, logits=logits, name='loss')
      weights = _weights(features, self._weight_column)
      training_loss = losses.compute_weighted_loss(
          unweighted_loss, weights=weights, reduction=losses.Reduction.SUM)
      if mode == model_fn.ModeKeys.EVAL:
        return model_fn.EstimatorSpec(
            mode=model_fn.ModeKeys.EVAL,
            predictions=predictions,
            loss=training_loss,
            eval_metric_ops=self._eval_metric_ops(
                labels=labels,
                logits=logits,
                logistic=logistic,
                scores=scores,
                class_ids=class_ids,
                unweighted_loss=unweighted_loss,
                weights=weights))

      # Train.
      if train_op_fn is None:
        raise ValueError('train_op_fn can not be None.')
    with ops.name_scope(''):
      summary.scalar(metric_keys.MetricKeys.LOSS, training_loss)
      summary.scalar(metric_keys.MetricKeys.LOSS_MEAN,
                     losses.compute_weighted_loss(
                         unweighted_loss,
                         weights=weights,
                         reduction=losses.Reduction.MEAN))
    return model_fn.EstimatorSpec(
        mode=model_fn.ModeKeys.TRAIN,
        predictions=predictions,
        loss=training_loss,
        train_op=train_op_fn(training_loss))
Exemplo n.º 42
0
 def _train_op_fn(loss):
     return string_ops.string_join([
         constant_op.constant(expected_train_result),
         string_ops.as_string(loss, precision=3)
     ])
Exemplo n.º 43
0
 def minimize(self, loss, global_step):
     del global_step
     return string_ops.string_join([
         constant_op.constant(expected_train_result),
         string_ops.as_string(loss, precision=3)
     ])
Exemplo n.º 44
0
 def _train_op_fn(loss):
   return string_ops.string_join(
       [constant_op.constant(expected_train_result),
        string_ops.as_string(loss, precision=3)])
Exemplo n.º 45
0
 def fill_tuple(x):
     filled = array_ops.fill([x], x)
     return (filled, string_ops.as_string(filled))
Exemplo n.º 46
0
def _as_string(tensor):
  if dtypes.string == tensor.dtype.base_dtype:
    return tensor
  return string_ops.as_string(tensor)
Exemplo n.º 47
0
def _as_string(tensor):
  if dtypes.string == tensor.dtype.base_dtype:
    return tensor
  return string_ops.as_string(tensor)
Exemplo n.º 48
0
 def _map_fn(v):
   return (v, array_ops.fill([v], v),
           array_ops.fill([3], string_ops.as_string(v)))
Exemplo n.º 49
0
 def _map_fn(v):
     return (v, array_ops.fill([v], v),
             array_ops.fill([3], string_ops.as_string(v)))
Exemplo n.º 50
0
 def _map_fn(v):
     return {
         "x": v,
         "y": array_ops.fill([v], v),
         "z": array_ops.fill([3], string_ops.as_string(v))
     }
 def host_call(predictions):
     classes = string_ops.as_string(predictions, name='classes')
     classification_output = export_output_lib.ClassificationOutput(
         classes=classes)
     export_outputs['classification'] = classification_output