def _test_bucket_by_padding(no_padding):
      dataset = dataset_ops.Dataset.from_generator(
          element_gen, (dtypes.int64,), ([None],))
      if no_padding:
        dataset = dataset.map(lambda x: (layers.dense_to_sparse(x),))
      dataset = dataset.apply(
          grouping.bucket_by_sequence_length(
              _element_length_fn,
              boundaries,
              batch_sizes,
              no_padding=no_padding))
      batch, = dataset.make_one_shot_iterator().get_next()

      with self.cached_session() as sess:
        batches = []
        for _ in range(4):
          batches.append(sess.run(batch))
        with self.assertRaises(errors.OutOfRangeError):
          sess.run(batch)
      batch_sizes_val = []
      lengths_val = []
      for batch in batches:
        shape = batch.dense_shape if no_padding else batch.shape
        batch_size = shape[0]
        length = shape[1]
        batch_sizes_val.append(batch_size)
        lengths_val.append(length)
        sum_check = batch.values.sum() if no_padding else batch.sum()
        self.assertEqual(sum_check, batch_size * length - 1)
      self.assertEqual(sum(batch_sizes_val), sum(batch_sizes))
      self.assertEqual(sorted(batch_sizes), sorted(batch_sizes_val))
      self.assertEqual(sorted(lengths), sorted(lengths_val))
def NPairsLossMultilabel(pregrasp_embedding, goal_embedding,
                         postgrasp_embedding, grasp_success, params):
    """Uses npairs_loss in both directions.

  Args:
    pregrasp_embedding: Batch of embeddings of the pregrasp image
    goal_embedding: Batch of embeddings of the goal image
    postgrasp_embedding: Batch of embeddings of the postgrasp image
    grasp_success: Batch of 1s and 0s indicating grasp success.
    params: Parameters for loss. Currently unused.
  Returns:
    A scalar loss
  """
    del params
    pair_a = pregrasp_embedding - postgrasp_embedding
    pair_b = goal_embedding
    grasp_success = tf.cast(tf.squeeze(grasp_success), tf.int32)
    range_tensor = (tf.range(pregrasp_embedding.shape[0],
                             dtype=tf.int32)) * grasp_success
    labels = tf.one_hot(range_tensor,
                        pregrasp_embedding.shape[0] + 1,
                        on_value=1,
                        off_value=0)
    sparse_labels = [
        contrib_layers.dense_to_sparse(labels[i])
        for i in range(labels.shape[0])
    ]
    loss_1 = contrib_losses.metric_learning.npairs_loss_multilabel(
        sparse_labels, pair_a, pair_b)
    loss_2 = contrib_losses.metric_learning.npairs_loss_multilabel(
        sparse_labels, pair_b, pair_a)

    tf.summary.scalar('npairs_loss1', loss_1)
    tf.summary.scalar('npairs_loss2', loss_2)
    return loss_1 + loss_2
Exemple #3
0
        def _test_bucket_by_padding(no_padding):
            dataset = dataset_ops.Dataset.from_generator(
                element_gen, (dtypes.int64, ), ([None], ))
            if no_padding:
                dataset = dataset.map(lambda x: (layers.dense_to_sparse(x), ))
            dataset = dataset.apply(
                grouping.bucket_by_sequence_length(_element_length_fn,
                                                   boundaries,
                                                   batch_sizes,
                                                   no_padding=no_padding))
            batch, = dataset.make_one_shot_iterator().get_next()

            with self.cached_session() as sess:
                batches = []
                for _ in range(4):
                    batches.append(sess.run(batch))
                with self.assertRaises(errors.OutOfRangeError):
                    sess.run(batch)
            batch_sizes_val = []
            lengths_val = []
            for batch in batches:
                shape = batch.dense_shape if no_padding else batch.shape
                batch_size = shape[0]
                length = shape[1]
                batch_sizes_val.append(batch_size)
                lengths_val.append(length)
                sum_check = batch.values.sum() if no_padding else batch.sum()
                self.assertEqual(sum_check, batch_size * length - 1)
            self.assertEqual(sum(batch_sizes_val), sum(batch_sizes))
            self.assertEqual(sorted(batch_sizes), sorted(batch_sizes_val))
            self.assertEqual(sorted(lengths), sorted(lengths_val))
Exemple #4
0
def _indices_to_multihot(indices, vocab_size):
    """Converts [batch,n_labels] of indices to [batch,vocab_size] multihot.

  Indices can be padded with -1.

  Args:
    indices: dense tensor of indices [batch, arbitrary_n_labels], padded with -1
      if necessary.
    vocab_size: integer vocab_size.

  Returns:
    Multihot float32 tensor of dimension [batch, vocab_size].

  e.g. [[0,1],[2,-1]] (vocab_size:4) -> [1,1,0,0], [0,0,1,0]

  """

    if len(indices.shape) != 2:
        raise ValueError(
            'indices_to_multihot expects tensors of dimension 2, got shape %s'
            % indices.shape)

    sparse_indices = contrib_layers.dense_to_sparse(indices, eos_token=-1)

    multihot = tf.sparse.to_indicator(sparse_indices, vocab_size=vocab_size)

    multihot = tf.cast(multihot, tf.float32)
    return multihot
 def _build_dataset():
   input_data = [range(i+1) for i in range(min_len, max_len)]
   def generator_fn():
     for record in input_data:
       yield record
   dataset = dataset_ops.Dataset.from_generator(
       generator=generator_fn,
       output_shapes=(tensor_shape.TensorShape([None])),
       output_types=(dtypes.int64))
   dataset = dataset.map(lambda x: layers.dense_to_sparse(x, eos_token=-1))
   return dataset
Exemple #6
0
        def _build_dataset():
            input_data = [range(i + 1) for i in range(min_len, max_len)]

            def generator_fn():
                for record in input_data:
                    yield record

            dataset = dataset_ops.Dataset.from_generator(
                generator=generator_fn,
                output_shapes=(tensor_shape.TensorShape([None])),
                output_types=(dtypes.int64))
            dataset = dataset.map(
                lambda x: layers.dense_to_sparse(x, eos_token=-1))
            return dataset
 def _test_tuple_elements_by_padding(no_padding):
   dataset = dataset_ops.Dataset.from_generator(
       generator=elements_gen,
       output_shapes=(tensor_shape.TensorShape([None]),
                      tensor_shape.TensorShape([])),
       output_types=(dtypes.int32, dtypes.int32))
   if no_padding:
     dataset = dataset.map(lambda x, y: (layers.dense_to_sparse(x), y))
   dataset = dataset.apply(grouping.bucket_by_sequence_length(
       element_length_func=_element_length_fn,
       bucket_batch_sizes=[2, 2, 2],
       bucket_boundaries=[0, 8],
       no_padding=no_padding))
   shapes = dataset.output_shapes
   self.assertEqual([None, None], shapes[0].as_list())
   self.assertEqual([None], shapes[1].as_list())
Exemple #8
0
 def _test_tuple_elements_by_padding(no_padding):
     dataset = dataset_ops.Dataset.from_generator(
         generator=elements_gen,
         output_shapes=(tensor_shape.TensorShape([None]),
                        tensor_shape.TensorShape([])),
         output_types=(dtypes.int32, dtypes.int32))
     if no_padding:
         dataset = dataset.map(lambda x, y:
                               (layers.dense_to_sparse(x), y))
     dataset = dataset.apply(
         grouping.bucket_by_sequence_length(
             element_length_func=_element_length_fn,
             bucket_batch_sizes=[2, 2, 2],
             bucket_boundaries=[0, 8],
             no_padding=no_padding))
     shapes = dataset.output_shapes
     self.assertEqual([None, None], shapes[0].as_list())
     self.assertEqual([None], shapes[1].as_list())