コード例 #1
0
    def testEmpty(self):
        x = np.random.randint(2, size=0)
        y, idx, count = array_ops.unique_with_counts(x)
        tf_y, tf_idx, tf_count = self.evaluate([y, idx, count])

        self.assertEqual(tf_idx.shape, (0, ))
        self.assertEqual(tf_y.shape, (0, ))
        self.assertEqual(tf_count.shape, (0, ))
コード例 #2
0
  def testBool(self):
    x = np.random.choice([True, False], size=7000)
    y, idx, count = array_ops.unique_with_counts(x)
    tf_y, tf_idx, tf_count = self.evaluate([y, idx, count])

    self.assertEqual(len(x), len(tf_idx))
    self.assertEqual(len(tf_y), len(np.unique(x)))
    for i in range(len(x)):
      self.assertEqual(x[i], tf_y[tf_idx[i]])
    for value, count in zip(tf_y, tf_count):
      self.assertEqual(count, np.sum(x == value))
コード例 #3
0
  def testInt32OutIdxInt64(self):
    x = np.random.randint(2, high=10, size=7000)
    y, idx, count = array_ops.unique_with_counts(x, out_idx=dtypes.int64)
    tf_y, tf_idx, tf_count = self.evaluate([y, idx, count])

    self.assertEqual(len(x), len(tf_idx))
    self.assertEqual(len(tf_y), len(np.unique(x)))
    for i in range(len(x)):
      self.assertEqual(x[i], tf_y[tf_idx[i]])
    for value, count in zip(tf_y, tf_count):
      self.assertEqual(count, np.sum(x == value))
コード例 #4
0
  def testInt32(self):
    x = np.random.randint(2, high=10, size=7000)
    with self.test_session() as sess:
      y, idx, count = array_ops.unique_with_counts(x)
      tf_y, tf_idx, tf_count = sess.run([y, idx, count])

    self.assertEqual(len(x), len(tf_idx))
    self.assertEqual(len(tf_y), len(np.unique(x)))
    for i in range(len(x)):
      self.assertEqual(x[i], tf_y[tf_idx[i]])
    for value, count in zip(tf_y, tf_count):
      self.assertEqual(count, np.sum(x == value))
コード例 #5
0
  def testInt32(self):
    x = np.random.randint(2, high=10, size=7000)
    with self.test_session() as sess:
      y, idx, count = array_ops.unique_with_counts(x)
      tf_y, tf_idx, tf_count = sess.run([y, idx, count])

    self.assertEqual(len(x), len(tf_idx))
    self.assertEqual(len(tf_y), len(np.unique(x)))
    for i in range(len(x)):
      self.assertEqual(x[i], tf_y[tf_idx[i]])
    for value, count in zip(tf_y, tf_count):
      self.assertEqual(count, np.sum(x == value))
コード例 #6
0
 def testOrderedByAppearance(self):
     x = np.array(
         [3, 5, 3, 4, 1, 4, 9, 8, 6, 3, 5, 7, 8, 8, 4, 6, 4, 2, 5, 6])
     true_y = np.array([3, 5, 4, 1, 9, 8, 6, 7, 2])
     true_idx = np.array(
         [0, 1, 0, 2, 3, 2, 4, 5, 6, 0, 1, 7, 5, 5, 2, 6, 2, 8, 1, 6])
     true_count = np.array([3, 3, 4, 1, 1, 3, 3, 1, 1])
     y, idx, count = array_ops.unique_with_counts(x)
     tf_y, tf_idx, tf_count = self.evaluate([y, idx, count])
     self.assertAllEqual(tf_y, true_y)
     self.assertAllEqual(tf_idx, true_idx)
     self.assertAllEqual(tf_count, true_count)
コード例 #7
0
ファイル: unique_op_test.py プロジェクト: Wajih-O/tensorflow
  def testInt32OutIdxInt64(self):
    x = np.random.randint(2, high=10, size=7000)
    with self.cached_session() as sess:
      y, idx, count = array_ops.unique_with_counts(x, out_idx=dtypes.int64)
      tf_y, tf_idx, tf_count = self.evaluate([y, idx, count])

    self.assertEqual(len(x), len(tf_idx))
    self.assertEqual(len(tf_y), len(np.unique(x)))
    for i in range(len(x)):
      self.assertEqual(x[i], tf_y[tf_idx[i]])
    for value, count in zip(tf_y, tf_count):
      self.assertEqual(count, np.sum(x == value))
コード例 #8
0
  def testString(self):
    indx = np.random.randint(65, high=122, size=7000)
    x = [chr(i) for i in indx]

    y, idx, count = array_ops.unique_with_counts(x)
    tf_y, tf_idx, tf_count = self.evaluate([y, idx, count])

    self.assertEqual(len(x), len(tf_idx))
    self.assertEqual(len(tf_y), len(np.unique(x)))
    for i in range(len(x)):
      self.assertEqual(x[i], tf_y[tf_idx[i]].decode('ascii'))
    for value, count in zip(tf_y, tf_count):
      v = [1 if x[i] == value.decode('ascii') else 0 for i in range(7000)]
      self.assertEqual(count, sum(v))
コード例 #9
0
def _center_loss(logit, labels, alpha, lam, num_classes, dtype=dtypes.float32):
    """
    coumpute the center loss and update the centers,
    followed by 'A Discriminative Feature Learning Approach for Deep Face Recognition',ECCV 2016

    :param logit: output of NN full connection layer, [batch_size, feature_dimension] tensor
    :param labels: true label of every sample, [batch_size] tensor without ont-hot
    :param alpha: learning rate about speed of updating, 0-1 float
    :param lam: center loss weight compared to softmax loss and others
    :param num_classes: classes numbers,int
    :return:
        loss: the computed center loss
        centers: tensor of all centers,[num_classes, feature_dimension]
        centers_update_op: should be running while training the model to update centers
    """

    # get feature dimension
    fea_dimension = array_ops.shape(logit)[1]

    # initialize centers
    centers = variable_scope.get_variable(
        'centers', [num_classes, fea_dimension],
        dtype=dtype,
        initializer=init_ops.constant_initializer(0),
        trainable=False)

    labels = array_ops.reshape(labels, [-1])

    # get centers about current batch
    centers_batch = array_ops.gather(centers, labels)

    # compote l2 loss
    loss = nn_ops.l2_loss(logit - centers_batch) * lam

    # compute the difference between each sample and their corresponding center
    diff = centers_batch - logit

    # compute delta of corresponding center
    unique_label, unique_idx, unique_count = array_ops.unique_with_counts(
        labels)
    appear_times = array_ops.gather(unique_count, unique_idx)
    appear_times = array_ops.reshape(appear_times, [-1, 1])
    delta_centers = diff / math_ops.cast(1 + appear_times, tf.float32)
    delta_centers = delta_centers * alpha

    # update centers
    center_update_op = state_ops.scatter_sub(centers, labels, delta_centers)

    return loss, centers, center_update_op
コード例 #10
0
  def testString(self):
    indx = np.random.randint(65, high=122, size=7000)
    x = [chr(i) for i in indx]

    with self.test_session() as sess:
      y, idx, count = array_ops.unique_with_counts(x)
      tf_y, tf_idx, tf_count = sess.run([y, idx, count])

    self.assertEqual(len(x), len(tf_idx))
    self.assertEqual(len(tf_y), len(np.unique(x)))
    for i in range(len(x)):
      self.assertEqual(x[i], tf_y[tf_idx[i]].decode('ascii'))
    for value, count in zip(tf_y, tf_count):
      v = [1 if x[i] == value.decode('ascii') else 0 for i in range(7000)]
      self.assertEqual(count, sum(v))
コード例 #11
0
    def testWatchingOnlyOneOfTwoOutputSlotsDoesNotLeadToCausalityFailure(self):
        with session.Session() as sess:
            x_name = "oneOfTwoSlots/x"
            u_name = "oneOfTwoSlots/u"
            v_name = "oneOfTwoSlots/v"
            w_name = "oneOfTwoSlots/w"
            y_name = "oneOfTwoSlots/y"

            x = variables.Variable([1, 3, 3, 7],
                                   dtype=dtypes.int32,
                                   name=x_name)
            sess.run(x.initializer)

            unique_x, indices, _ = array_ops.unique_with_counts(x, name=u_name)

            v = math_ops.add(unique_x, unique_x, name=v_name)
            w = math_ops.add(indices, indices, name=w_name)
            y = math_ops.add(w, w, name=y_name)

            run_options = config_pb2.RunOptions(output_partition_graphs=True)
            # Watch only the first output slot of u, even though it has two output
            # slots.
            debug_utils.add_debug_tensor_watch(run_options,
                                               u_name,
                                               0,
                                               debug_urls=self._debug_urls())
            debug_utils.add_debug_tensor_watch(run_options,
                                               w_name,
                                               0,
                                               debug_urls=self._debug_urls())
            debug_utils.add_debug_tensor_watch(run_options,
                                               y_name,
                                               0,
                                               debug_urls=self._debug_urls())

            run_metadata = config_pb2.RunMetadata()
            sess.run([v, y], options=run_options, run_metadata=run_metadata)

            dump = debug_data.DebugDumpDir(
                self._dump_root,
                partition_graphs=run_metadata.partition_graphs,
                validate=True)

            self.assertAllClose([1, 3, 7],
                                dump.get_tensors(u_name, 0,
                                                 "DebugIdentity")[0])
コード例 #12
0
  def testWatchingOnlyOneOfTwoOutputSlotsDoesNotLeadToCausalityFailure(self):
    with session.Session() as sess:
      x_name = "oneOfTwoSlots/x"
      u_name = "oneOfTwoSlots/u"
      v_name = "oneOfTwoSlots/v"
      w_name = "oneOfTwoSlots/w"
      y_name = "oneOfTwoSlots/y"

      x = variables.Variable([1, 3, 3, 7], dtype=tf.int32, name=x_name)
      sess.run(x.initializer)

      unique_x, indices, _ = array_ops.unique_with_counts(x, name=u_name)

      v = math_ops.add(unique_x, unique_x, name=v_name)
      w = math_ops.add(indices, indices, name=w_name)
      y = math_ops.add(w, w, name=y_name)

      run_options = config_pb2.RunOptions(output_partition_graphs=True)
      # Watch only the first output slot of u, even though it has two output
      # slots.
      debug_utils.add_debug_tensor_watch(
          run_options, u_name, 0, debug_urls=self._debug_urls())
      debug_utils.add_debug_tensor_watch(
          run_options, w_name, 0, debug_urls=self._debug_urls())
      debug_utils.add_debug_tensor_watch(
          run_options, y_name, 0, debug_urls=self._debug_urls())

      run_metadata = config_pb2.RunMetadata()
      sess.run([v, y], options=run_options, run_metadata=run_metadata)

      dump = debug_data.DebugDumpDir(
          self._dump_root,
          partition_graphs=run_metadata.partition_graphs,
          validate=True)

      self.assertAllClose([1, 3, 7],
                          dump.get_tensors(u_name, 0, "DebugIdentity")[0])