Exemplo n.º 1
0
 def _to_batched_tensor_list(self, value):
     if self._dense_shape.merge_with(
             tensor_util.constant_value_as_shape(
                 value.dense_shape)).ndims == 0:
         raise ValueError(
             "Unbatching a sparse tensor is only supported for rank >= 1")
     return [
         sparse_ops.serialize_many_sparse(value, out_type=dtypes.variant)
     ]
Exemplo n.º 2
0
    def benchmarkVeryLarge2DFloatSparseTensor(self):
        np.random.seed(127)
        num_elements = 10000
        batch_size = 64
        indices_batch = np.random.randint(batch_size,
                                          size=num_elements,
                                          dtype=np.int64)
        indices_value = np.arange(num_elements, dtype=np.int64)
        indices = np.asarray(sorted(zip(indices_batch, indices_value)),
                             dtype=np.int64)
        values = ["feature_value_for_embedding_lookup"] * num_elements
        shape = np.asarray([batch_size, num_elements], dtype=np.int64)
        with session.Session(config=benchmark.benchmark_config()) as sess:
            with ops.device("/cpu:0"):
                indices = variables.Variable(indices)
                values = variables.Variable(values)
                shape = variables.Variable(shape)
                st = sparse_tensor_lib.SparseTensor(indices, values, shape)

                st_handles = add_many_sparse_to_tensors_map(st)
                st_roundtrip = take_many_sparse_from_tensors_map(
                    sparse_map_op=st_handles.op, sparse_handles=st_handles)
                st_roundtrip_op = st_roundtrip.values.op

                st_serialized = sparse_ops.serialize_many_sparse(st)
                st_deserialized = sparse_ops.deserialize_many_sparse(
                    st_serialized, dtype=values.dtype)
                st_deserialized_op = st_deserialized.values.op

                variables.global_variables_initializer().run()

                st_roundtrip_values = self.evaluate(st_roundtrip)
                st_deserialized_values = self.evaluate(st_deserialized)
                np.testing.assert_equal(st_roundtrip_values.values,
                                        st_deserialized_values.values)
                np.testing.assert_equal(st_roundtrip_values.indices,
                                        st_deserialized_values.indices)
                np.testing.assert_equal(st_roundtrip_values.dense_shape,
                                        st_deserialized_values.dense_shape)

                self.run_op_benchmark(
                    sess,
                    st_roundtrip_op,
                    min_iters=2000,
                    name="benchmark_very_large_2d_float_st_tensor_maps")
                self.run_op_benchmark(
                    sess,
                    st_deserialized_op,
                    min_iters=2000,
                    name="benchmark_very_large_2d_float_st_serialization")
  def benchmarkVeryLarge2DFloatSparseTensor(self):
    np.random.seed(127)
    num_elements = 10000
    batch_size = 64
    indices_batch = np.random.randint(
        batch_size, size=num_elements, dtype=np.int64)
    indices_value = np.arange(num_elements, dtype=np.int64)
    indices = np.asarray(
        sorted(zip(indices_batch, indices_value)), dtype=np.int64)
    values = ["feature_value_for_embedding_lookup"] * num_elements
    shape = np.asarray([batch_size, num_elements], dtype=np.int64)
    with session.Session(config=benchmark.benchmark_config()) as sess:
      with ops.device("/cpu:0"):
        indices = variables.Variable(indices)
        values = variables.Variable(values)
        shape = variables.Variable(shape)
        st = sparse_tensor_lib.SparseTensor(indices, values, shape)

        st_handles = add_many_sparse_to_tensors_map(st)
        st_roundtrip = take_many_sparse_from_tensors_map(
            sparse_map_op=st_handles.op, sparse_handles=st_handles)
        st_roundtrip_op = st_roundtrip.values.op

        st_serialized = sparse_ops.serialize_many_sparse(st)
        st_deserialized = sparse_ops.deserialize_many_sparse(
            st_serialized, dtype=values.dtype)
        st_deserialized_op = st_deserialized.values.op

        variables.global_variables_initializer().run()

        st_roundtrip_values = sess.run(st_roundtrip)
        st_deserialized_values = sess.run(st_deserialized)
        np.testing.assert_equal(st_roundtrip_values.values,
                                st_deserialized_values.values)
        np.testing.assert_equal(st_roundtrip_values.indices,
                                st_deserialized_values.indices)
        np.testing.assert_equal(st_roundtrip_values.dense_shape,
                                st_deserialized_values.dense_shape)

        self.run_op_benchmark(
            sess,
            st_roundtrip_op,
            min_iters=2000,
            name="benchmark_very_large_2d_float_st_tensor_maps")
        self.run_op_benchmark(
            sess,
            st_deserialized_op,
            min_iters=2000,
            name="benchmark_very_large_2d_float_st_serialization")
Exemplo n.º 4
0
def serialize_many_sparse_tensors(tensors):
    """Serializes many sparse tensors into a batch.

  Args:
    tensors: a tensor structure to serialize.

  Returns:
    `tensors` with any sparse tensors replaced by the serialized batch.
  """

    ret = nest.pack_sequence_as(tensors, [
        sparse_ops.serialize_many_sparse(tensor, out_type=dtypes.variant)
        if sparse_tensor.is_sparse(tensor) else tensor
        for tensor in nest.flatten(tensors)
    ])
    return ret
Exemplo n.º 5
0
def serialize_many_sparse_tensors(tensors):
  """Serializes many sparse tensors into a batch.

  Args:
    tensors: a tensor structure to serialize.

  Returns:
    `tensors` with any sparse tensors replaced by the serialized batch.
  """

  ret = nest.pack_sequence_as(tensors, [
      sparse_ops.serialize_many_sparse(tensor, out_type=dtypes.variant)
      if sparse_tensor.is_sparse(tensor) else tensor
      for tensor in nest.flatten(tensors)
  ])
  return ret
 def testSerializeManyDeserializeManyRoundTrip(self):
     with self.test_session(use_gpu=False) as sess:
         # N == 4 because shape_value == [4, 5]
         indices_value = np.array([[0, 0], [0, 1], [2, 0]], dtype=np.int64)
         values_value = np.array([b"a", b"b", b"c"])
         shape_value = np.array([4, 5], dtype=np.int64)
         sparse_tensor = self._SparseTensorPlaceholder(dtype=dtypes.string)
         serialized = sparse_ops.serialize_many_sparse(sparse_tensor)
         deserialized = sparse_ops.deserialize_many_sparse(
             serialized, dtype=dtypes.string)
         serialized_value, deserialized_value = sess.run(
             [serialized, deserialized],
             feed_dict={
                 sparse_tensor.indices: indices_value,
                 sparse_tensor.values: values_value,
                 sparse_tensor.dense_shape: shape_value
             })
         self.assertEqual(serialized_value.shape, (4, 3))
         self.assertAllEqual(deserialized_value.indices, indices_value)
         self.assertAllEqual(deserialized_value.values, values_value)
         self.assertAllEqual(deserialized_value.dense_shape, shape_value)
 def testSerializeManyDeserializeManyRoundTrip(self):
   with self.test_session(use_gpu=False) as sess:
     # N == 4 because shape_value == [4, 5]
     indices_value = np.array([[0, 0], [0, 1], [2, 0]], dtype=np.int64)
     values_value = np.array([b"a", b"b", b"c"])
     shape_value = np.array([4, 5], dtype=np.int64)
     sparse_tensor = self._SparseTensorPlaceholder(dtype=dtypes.string)
     serialized = sparse_ops.serialize_many_sparse(sparse_tensor)
     deserialized = sparse_ops.deserialize_many_sparse(
         serialized, dtype=dtypes.string)
     serialized_value, deserialized_value = sess.run(
         [serialized, deserialized],
         feed_dict={
             sparse_tensor.indices: indices_value,
             sparse_tensor.values: values_value,
             sparse_tensor.dense_shape: shape_value
         })
     self.assertEqual(serialized_value.shape, (4, 3))
     self.assertAllEqual(deserialized_value.indices, indices_value)
     self.assertAllEqual(deserialized_value.values, values_value)
     self.assertAllEqual(deserialized_value.dense_shape, shape_value)
Exemplo n.º 8
0
 def _maybe_serialize(t):
   if not isinstance(t, ops.SparseTensor):
     return t
   return (sparse_ops.serialize_many_sparse(t) if enqueue_many
           else sparse_ops.serialize_sparse(t))
Exemplo n.º 9
0
 def _maybe_serialize(t):
     if not isinstance(t, ops.SparseTensor):
         return t
     return (sparse_ops.serialize_many_sparse(t)
             if enqueue_many else sparse_ops.serialize_sparse(t))
Exemplo n.º 10
0
 def _maybe_serialize(t, is_sparse):
   if not is_sparse:
     return t
   return (sparse_ops.serialize_many_sparse(t) if enqueue_many
           else sparse_ops.serialize_sparse(t))
Exemplo n.º 11
0
 def _maybe_serialize(t, is_sparse):
     if not is_sparse:
         return t
     return (sparse_ops.serialize_many_sparse(t)
             if enqueue_many else sparse_ops.serialize_sparse(t))
Exemplo n.º 12
0
 def _to_batched_tensor_list(self, value):
   if self._dense_shape.merge_with(
       tensor_util.constant_value_as_shape(value.dense_shape)).ndims == 0:
     raise ValueError(
         "Unbatching a sparse tensor is only supported for rank >= 1")
   return [sparse_ops.serialize_many_sparse(value, out_type=dtypes.variant)]