コード例 #1
0
  def _compare(self, sp_t, reduction_axes, ndims, keep_dims):
    densified = sparse_ops.sparse_tensor_to_dense(sp_t).eval()

    np_ans = densified
    if reduction_axes is None:
      np_ans = np.sum(np_ans, keepdims=keep_dims)
    else:
      if not isinstance(reduction_axes, list):  # Single scalar.
        reduction_axes = [reduction_axes]
      reduction_axes = np.array(reduction_axes).astype(np.int32)
      # Handles negative axes.
      reduction_axes = (reduction_axes + ndims) % ndims
      # Loop below depends on sorted.
      reduction_axes.sort()
      for ra in reduction_axes.ravel()[::-1]:
        np_ans = np.sum(np_ans, axis=ra, keepdims=keep_dims)

    with self.test_session():
      tf_dense_ans = sparse_ops.sparse_reduce_sum(sp_t, reduction_axes,
                                                  keep_dims)
      out_dense = tf_dense_ans.eval()

      tf_sparse_ans = sparse_ops.sparse_reduce_sum_sparse(sp_t, reduction_axes,
                                                          keep_dims)
      # Convert to dense for comparison purposes.
      out_sparse = sparse_ops.sparse_tensor_to_dense(tf_sparse_ans).eval()

    self.assertAllClose(np_ans, out_dense)
    self.assertAllClose(np_ans, out_sparse)
コード例 #2
0
ファイル: trainer.py プロジェクト: yanssy/PoseFaceGAN
    def _load_batch_pair_pose(self, dataset):
        data_provider = slim.dataset_data_provider.DatasetDataProvider(dataset, common_queue_capacity=32, common_queue_min=8)
        image_raw_0, image_raw_1, label, pose_0, pose_1, mask_0, mask_1 = data_provider.get([
            'image_raw_0', 'image_raw_1', 'label', 'pose_sparse_r4_0', 'pose_sparse_r4_1', 'pose_mask_r4_0', 'pose_mask_r4_1'])
        print("trainer--_load_batch_pair_pose:")
        print(pose_0)

        pose_0 = sparse_ops.sparse_tensor_to_dense(pose_0, default_value=0, validate_indices=False)
        pose_1 = sparse_ops.sparse_tensor_to_dense(pose_1, default_value=0, validate_indices=False)

        image_raw_0 = tf.reshape(image_raw_0, [128, 64, 3])        
        image_raw_1 = tf.reshape(image_raw_1, [128, 64, 3]) 
        pose_0 = tf.cast(tf.reshape(pose_0, [128, 64, self.keypoint_num]), tf.float32)  # 数据类型转换
        pose_1 = tf.cast(tf.reshape(pose_1, [128, 64, self.keypoint_num]), tf.float32)  #
        mask_0 = tf.cast(tf.reshape(mask_0, [128, 64, 1]), tf.float32)
        mask_1 = tf.cast(tf.reshape(mask_1, [128, 64, 1]), tf.float32)

        images_0, images_1, poses_0, poses_1, masks_0, masks_1 = tf.train.batch([image_raw_0, image_raw_1, pose_0, pose_1, mask_0, mask_1], 
                    batch_size=self.batch_size, num_threads=self.num_threads, capacity=self.capacityCoff * self.batch_size)

        images_0 = utils_wgan.process_image(tf.to_float(images_0), 127.5, 127.5)
        images_1 = utils_wgan.process_image(tf.to_float(images_1), 127.5, 127.5)
        poses_0 = poses_0*2-1
        poses_1 = poses_1*2-1
        return images_0, images_1, poses_0, poses_1, masks_0, masks_1
コード例 #3
0
ファイル: sparse_ops_test.py プロジェクト: jon-sch/tensorflow
  def testRandom(self):
    np.random.seed(1618)
    shapes = [(13,), (6, 8), (1, 7, 1)]
    for shape in shapes:
      for dtype in [np.int32, np.int64, np.float16, np.float32, np.float64]:
        a_np = np.random.randn(*shape).astype(dtype)
        b_np = np.random.randn(*shape).astype(dtype)
        sp_a, unused_a_nnz = _sparsify(a_np, thresh=-.5)
        sp_b, unused_b_nnz = _sparsify(b_np, thresh=-.5)

        with self.test_session(use_gpu=False):
          maximum_tf = sparse_ops.sparse_maximum(sp_a, sp_b)
          maximum_tf_densified = sparse_ops.sparse_tensor_to_dense(
              maximum_tf).eval()
          minimum_tf = sparse_ops.sparse_minimum(sp_a, sp_b)
          minimum_tf_densified = sparse_ops.sparse_tensor_to_dense(
              minimum_tf).eval()

          a_densified = sparse_ops.sparse_tensor_to_dense(sp_a).eval()
          b_densified = sparse_ops.sparse_tensor_to_dense(sp_b).eval()

        self.assertAllEqual(
            np.maximum(a_densified, b_densified), maximum_tf_densified)
        self.assertAllEqual(
            np.minimum(a_densified, b_densified), minimum_tf_densified)
コード例 #4
0
    def tensors_to_item(self, keys_to_tensors):
        indices = keys_to_tensors[self._indices_key]
        values = keys_to_tensors[self._values_key]
        if self._shape_key:
            shape = keys_to_tensors[self._shape_key]
            if isinstance(shape, sparse_tensor.SparseTensor):
                shape = sparse_ops.sparse_tensor_to_dense(shape)
        elif self._shape:
            shape = self._shape
        else:
            shape = indices.dense_shape
        indices_shape = array_ops.shape(indices.indices)
        rank = indices_shape[1]
        ids = math_ops.to_int64(indices.values)
        indices_columns_to_preserve = array_ops.slice(
            indices.indices, [0, 0], array_ops.stack([-1, rank - 1]))
        new_indices = array_ops.concat_v2(
            [indices_columns_to_preserve,
             array_ops.reshape(ids, [-1, 1])], 1)

        tensor = sparse_tensor.SparseTensor(new_indices, values.values, shape)
        if self._densify:
            tensor = sparse_ops.sparse_tensor_to_dense(tensor,
                                                       self._default_value)
        return tensor
コード例 #5
0
  def _compare(self, sp_t, reduction_axes, ndims, keep_dims):
    densified = sparse_ops.sparse_tensor_to_dense(sp_t).eval()

    np_ans = densified
    if reduction_axes is None:
      np_ans = np.sum(np_ans, keepdims=keep_dims)
    else:
      if not isinstance(reduction_axes, list):  # Single scalar.
        reduction_axes = [reduction_axes]
      reduction_axes = np.array(reduction_axes).astype(np.int32)
      # Handles negative axes.
      reduction_axes = (reduction_axes + ndims) % ndims
      # Loop below depends on sorted.
      reduction_axes.sort()
      for ra in reduction_axes.ravel()[::-1]:
        np_ans = np.sum(np_ans, axis=ra, keepdims=keep_dims)

    with self.test_session():
      tf_dense_ans = sparse_ops.sparse_reduce_sum(sp_t, reduction_axes,
                                                  keep_dims)
      out_dense = tf_dense_ans.eval()

      tf_sparse_ans = sparse_ops.sparse_reduce_sum_sparse(sp_t, reduction_axes,
                                                          keep_dims)
      # Convert to dense for comparison purposes.
      out_sparse = sparse_ops.sparse_tensor_to_dense(tf_sparse_ans).eval()

    self.assertAllClose(np_ans, out_dense)
    self.assertAllClose(np_ans, out_sparse)
コード例 #6
0
ファイル: transform.py プロジェクト: nininininini/tensorx
def dense_put(tensor, sp_updates, name="dense_put"):
    """ Changes a given dense ``Tensor`` according to the updates specified in a ``SparseTensor``.

    Creates a new ``Tensor`` where the values of the updates override the
    values in the original tensor. The tensor `shape` must be the same as the updates `dense_shape`.

    Args:
        tensor: a ``Tensor`` we want to change.
        sp_updates: a ``SparseTensor`` with the indices to be changed and the respective values.
        name: the name for this operation (optional).

    Returns:
        ``Tensor``: a ``Tensor`` with the updated values.
    """
    with ops.name_scope(name):
        tensor = ops.convert_to_tensor(tensor)
        if sp_updates.dtype != tensor.dtype:
            sp_updates = math_ops.cast(sp_updates, tensor.dtype)

        markers = array_ops.ones(shape=array_ops.shape(sp_updates.values))
        sparse_marker_tensor = SparseTensor(indices=sp_updates.indices,
                                            values=markers,
                                            dense_shape=sp_updates.dense_shape)
        dense_update_marker = sp_ops.sparse_tensor_to_dense(
            sparse_marker_tensor)
        dense_updates = sp_ops.sparse_tensor_to_dense(sp_updates)

        new_tensor = array_ops.where(
            math_ops.not_equal(dense_update_marker, 0), dense_updates, tensor)
        return new_tensor
コード例 #7
0
ファイル: data_ops.py プロジェクト: LUTAN/tensorflow
def ParseLabelTensorOrDict(labels):
  """Return a tensor to use for input labels to tensor_forest.

  The incoming targets can be a dict where keys are the string names of the
  columns, which we turn into a single 1-D tensor for classification or
  2-D tensor for regression.

  Converts sparse tensors to dense ones.

  Args:
    labels: `Tensor` or `dict` of `Tensor` objects.

  Returns:
    A 2-D tensor for labels/outputs.
  """
  if isinstance(labels, dict):
    return math_ops.to_float(
        array_ops.concat(
            [
                sparse_ops.sparse_tensor_to_dense(
                    labels[k], default_value=-1) if isinstance(
                        labels, sparse_tensor.SparseTensor) else labels[k]
                for k in sorted(labels.keys())
            ],
            1))
  else:
    if isinstance(labels, sparse_tensor.SparseTensor):
      return math_ops.to_float(sparse_ops.sparse_tensor_to_dense(
          labels, default_value=-1))
    else:
      return math_ops.to_float(labels)
コード例 #8
0
    def testRandom(self):
        np.random.seed(1618)
        shapes = [(13, ), (6, 8), (1, 7, 1)]
        for shape in shapes:
            for dtype in [
                    np.int32, np.int64, np.float16, np.float32, np.float64
            ]:
                a_np = np.random.randn(*shape).astype(dtype)
                b_np = np.random.randn(*shape).astype(dtype)
                sp_a, unused_a_nnz = _sparsify(a_np, thresh=-.5)
                sp_b, unused_b_nnz = _sparsify(b_np, thresh=-.5)

                with self.cached_session(use_gpu=False):
                    maximum_tf = sparse_ops.sparse_maximum(sp_a, sp_b)
                    maximum_tf_densified = sparse_ops.sparse_tensor_to_dense(
                        maximum_tf).eval()
                    minimum_tf = sparse_ops.sparse_minimum(sp_a, sp_b)
                    minimum_tf_densified = sparse_ops.sparse_tensor_to_dense(
                        minimum_tf).eval()

                    a_densified = sparse_ops.sparse_tensor_to_dense(
                        sp_a).eval()
                    b_densified = sparse_ops.sparse_tensor_to_dense(
                        sp_b).eval()

                self.assertAllEqual(np.maximum(a_densified, b_densified),
                                    maximum_tf_densified)
                self.assertAllEqual(np.minimum(a_densified, b_densified),
                                    minimum_tf_densified)
コード例 #9
0
def ParseLabelTensorOrDict(labels):
  """Return a tensor to use for input labels to tensor_forest.

  The incoming targets can be a dict where keys are the string names of the
  columns, which we turn into a single 1-D tensor for classification or
  2-D tensor for regression.

  Converts sparse tensors to dense ones.

  Args:
    labels: `Tensor` or `dict` of `Tensor` objects.

  Returns:
    A 2-D tensor for labels/outputs.
  """
  if isinstance(labels, dict):
    return math_ops.to_float(
        array_ops.concat(
            [
                sparse_ops.sparse_tensor_to_dense(
                    labels[k], default_value=-1) if isinstance(
                        labels, sparse_tensor.SparseTensor) else labels[k]
                for k in sorted(labels.keys())
            ],
            1))
  else:
    if isinstance(labels, sparse_tensor.SparseTensor):
      return math_ops.to_float(sparse_ops.sparse_tensor_to_dense(
          labels, default_value=-1))
    else:
      return math_ops.to_float(labels)
コード例 #10
0
 def testPrintSparseTensorPassthrough(self):
   a = sparse_tensor.SparseTensor(
       indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
   b = sparse_tensor.SparseTensor(
       indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
   a = prettyprint_ops.print_op(a)
   with self.test_session():
     self.assertAllEqual(
         sparse_ops.sparse_tensor_to_dense(a).eval(),
         sparse_ops.sparse_tensor_to_dense(b).eval())
コード例 #11
0
 def sparse_to_dense_computation(inp, weight):
     if weight is None:
         weight = sparse_tensor.SparseTensor(
             inp.indices,
             array_ops.ones_like(inp.values, dtype=dtypes.float32),
             dense_shape=inp.dense_shape)
     # Pad the sparse tensor to be dense tensor.
     inp = sparse_ops.sparse_tensor_to_dense(inp)
     weight = sparse_ops.sparse_tensor_to_dense(weight)
     return inp, weight
コード例 #12
0
    def testDenseSequencesToSparse(self):
        labels = [[1, 3, 3, 3, 0], [1, 4, 4, 4, 0], [4, 2, 2, 9, 4]]
        length = [4, 5, 5]
        sparse = ctc_ops.dense_labels_to_sparse(labels, length)
        new_dense = sparse_ops.sparse_tensor_to_dense(sparse)

        self.assertAllEqual(labels, new_dense)

        padded_labels = [[1, 3, 3, 3, 0, 0, 0, 0], [1, 4, 4, 4, 0, 0, 0, 0],
                         [4, 2, 2, 9, 4, 0, 0, 0]]
        length = [4, 5, 5]
        sparse = ctc_ops.dense_labels_to_sparse(padded_labels, length)
        padded_dense = sparse_ops.sparse_tensor_to_dense(sparse)

        self.assertAllEqual(padded_dense, new_dense)
コード例 #13
0
 def tensors_to_item(self, keys_to_tensors):
     tensor = keys_to_tensors[self._tensor_key]
     shape = self._shape
     if self._shape_key:
         shape = keys_to_tensors[self._shape_key]
         if isinstance(shape, ops.SparseTensor):
             shape = sparse_ops.sparse_tensor_to_dense(shape)
     if isinstance(tensor, ops.SparseTensor):
         if shape is not None:
             tensor = sparse_ops.sparse_reshape(tensor, shape)
         tensor = sparse_ops.sparse_tensor_to_dense(tensor, self._default_value)
     else:
         if shape is not None:
             tensor = array_ops.reshape(tensor, shape)
     return tensor
コード例 #14
0
    def test_dense_output(self):
        dense_inputs = ops.convert_to_tensor_v2_with_dispatch(
            np.random.uniform(size=(10, 10)).astype('f'))
        # Create some sparse data where multiple rows and columns are missing.
        sparse_inputs = sparse_tensor.SparseTensor(
            indices=np.random.randint(low=0, high=10, size=(5, 2)),
            values=np.random.uniform(size=(5, )).astype('f'),
            dense_shape=[10, 10])
        sparse_inputs = sparse_ops.sparse_reorder(sparse_inputs)

        layer = keras.layers.Dense(
            5,
            kernel_initializer=keras.initializers.RandomUniform(),
            bias_initializer=keras.initializers.RandomUniform(),
            dtype='float32')
        dense_outputs = layer(dense_inputs)
        sparse_outpus = layer(sparse_inputs)

        expected_dense = math_ops.add(
            math_ops.matmul(dense_inputs,
                            keras.backend.get_value(layer.kernel)),
            keras.backend.get_value(layer.bias))
        expected_sparse = math_ops.add(
            math_ops.matmul(sparse_ops.sparse_tensor_to_dense(sparse_inputs),
                            keras.backend.get_value(layer.kernel)),
            keras.backend.get_value(layer.bias))

        self.assertAllClose(dense_outputs, expected_dense)
        self.assertAllClose(sparse_outpus, expected_sparse)
コード例 #15
0
ファイル: sparse_ops_test.py プロジェクト: Wajih-O/tensorflow
  def _testSparseReduceShape(self, sp_t, reduction_axes, ndims, keep_dims,
                             do_sum):
    densified = self.evaluate(sparse_ops.sparse_tensor_to_dense(sp_t))

    np_op = np.sum
    tf_op = sparse_ops.sparse_reduce_sum
    if not do_sum:
      np_op = np.max
      tf_op = sparse_ops.sparse_reduce_max

    np_ans = densified
    if reduction_axes is None:
      np_ans = np_op(np_ans, keepdims=keep_dims)
    else:
      if not isinstance(reduction_axes, list):  # Single scalar.
        reduction_axes = [reduction_axes]
      reduction_axes = np.array(reduction_axes).astype(np.int32)
      # Handles negative axes.
      reduction_axes = (reduction_axes + ndims) % ndims
      # Loop below depends on sorted.
      reduction_axes.sort()
      for ra in reduction_axes.ravel()[::-1]:
        np_ans = np_op(np_ans, axis=ra, keepdims=keep_dims)

    tf_ans = tf_op(sp_t, reduction_axes, keep_dims)
    self.assertAllEqual(np_ans.shape, tf_ans.get_shape().as_list())
コード例 #16
0
ファイル: layers.py プロジェクト: nininininini/tensorx
    def __init__(self, layer, mean=0.0, stddev=0.2, seed=None):
        super().__init__(input_layers=layer,
                         n_units=layer.n_units,
                         shape=layer.shape,
                         dtype=layer.dtype,
                         name=layer.name + "_gaussian_noise")

        self.mean = mean
        self.stddev = stddev
        self.seed = seed

        with name_scope(self.name):
            if layer.is_sparse():
                self.tensor = sparse_ops.sparse_tensor_to_dense(layer.tensor)
            else:
                self.tensor = layer.tensor

            noise_shape = array_ops.shape(self.tensor)
            noise = random_ops.random_normal(noise_shape,
                                             mean,
                                             stddev,
                                             seed=seed,
                                             dtype=dtypes.float32)

            self.tensor = math_ops.cast(self.tensor, dtypes.float32)
            self.tensor = math_ops.add(self.tensor, noise)
コード例 #17
0
    def get_sequence_dense_tensor(self, transformation_cache, state_manager):
        """Returns a `TensorSequenceLengthPair`.

    Args:
      transformation_cache: A `FeatureTransformationCache` object to access
        features.
      state_manager: A `StateManager` to create / access resources such as
        lookup tables.
    """
        sp_tensor = transformation_cache.get(self, state_manager)
        dense_tensor = sparse_ops.sparse_tensor_to_dense(
            sp_tensor, default_value=self.default_value)
        # Reshape into [batch_size, T, variable_shape].
        dense_shape = array_ops.concat(
            [array_ops.shape(dense_tensor)[:1], [-1], self.variable_shape],
            axis=0)
        dense_tensor = array_ops.reshape(dense_tensor, shape=dense_shape)

        # Get the number of timesteps per example
        # For the 2D case, the raw values are grouped according to num_elements;
        # for the 3D case, the grouping happens in the third dimension, and
        # sequence length is not affected.
        if sp_tensor.shape.ndims == 2:
            num_elements = self.variable_shape.num_elements()
        else:
            num_elements = 1
        seq_length = fc_utils.sequence_length_from_sparse_tensor(
            sp_tensor, num_elements=num_elements)

        return fc.SequenceDenseColumn.TensorSequenceLengthPair(
            dense_tensor=dense_tensor, sequence_length=seq_length)
コード例 #18
0
 def tensors_to_item(self, keys_to_tensors):
     tensor = keys_to_tensors[self._tensor_key]
     shape = self._shape
     if self._shape_key:
         shape = keys_to_tensors[self._shape_key]
         if isinstance(shape, ops.SparseTensor):
             shape = sparse_ops.sparse_tensor_to_dense(shape)
     if isinstance(tensor, ops.SparseTensor):
         if shape is not None:
             tensor = sparse_ops.sparse_reshape(tensor, shape)
         tensor = sparse_ops.sparse_tensor_to_dense(tensor,
                                                    self._default_value)
     else:
         if shape is not None:
             tensor = array_ops.reshape(tensor, shape)
     return tensor
コード例 #19
0
ファイル: categorical.py プロジェクト: ubiquity6/tensorflow
 def call(self, inputs):
   # (b/144500510) ragged.map_flat_values(sparse_cross_hashed, inputs) will
   # cause kernel failure. Investigate and find a more efficient implementation
   if all([ragged_tensor.is_ragged(inp) for inp in inputs]):
     inputs = [inp.to_sparse() if ragged_tensor.is_ragged(inp) else inp
               for inp in inputs]
     if self.num_bins is not None:
       output = sparse_ops.sparse_cross_hashed(
           inputs, num_buckets=self.num_bins)
     else:
       output = sparse_ops.sparse_cross(inputs)
     return ragged_tensor.RaggedTensor.from_sparse(output)
   if any([ragged_tensor.is_ragged(inp) for inp in inputs]):
     raise ValueError('Inputs must be either all `RaggedTensor`, or none of '
                      'them should be `RaggedTensor`, got {}'.format(inputs))
   sparse_output = False
   if any([isinstance(inp, sparse_tensor.SparseTensor) for inp in inputs]):
     sparse_output = True
   if self.num_bins is not None:
     output = sparse_ops.sparse_cross_hashed(
         inputs, num_buckets=self.num_bins)
   else:
     output = sparse_ops.sparse_cross(inputs)
   if not sparse_output:
     output = sparse_ops.sparse_tensor_to_dense(output)
   return output
コード例 #20
0
 def new_model_fn(features, labels, mode, config):  # pylint: disable=missing-docstring
     spec = estimator.model_fn(features, labels, mode, config)
     predictions = spec.predictions
     if predictions is None:
         return spec
     verify_keys_and_predictions(features, predictions)
     for key in get_keys(features):
         feature = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(
             features[key])
         if sparse_default_values and (key in sparse_default_values):
             if not isinstance(feature, sparse_tensor_lib.SparseTensor):
                 raise ValueError(
                     'Feature ({}) is expected to be a `SparseTensor`.'.
                     format(key))
             feature = sparse_ops.sparse_tensor_to_dense(
                 feature, default_value=sparse_default_values[key])
         if not isinstance(feature, ops.Tensor):
             raise ValueError(
                 'Feature ({}) should be a Tensor. Please use `keys` '
                 'argument of forward_features to filter unwanted features, or'
                 'add key to argument `sparse_default_values`.'
                 'Type of features[{}] is {}.'.format(
                     key, key, type(feature)))
         predictions[key] = feature
     spec = spec._replace(predictions=predictions)
     if spec.export_outputs:  # CHANGES HERE
         outputs = spec.export_outputs['predict'].outputs
         outputs[key] = spec.predictions[key]
         spec.export_outputs['predict'] = tf.estimator.export.PredictOutput(
             outputs)
         spec.export_outputs[
             'serving_default'] = tf.estimator.export.PredictOutput(outputs)
     return spec
コード例 #21
0
ファイル: extenders.py プロジェクト: AnishShah/tensorflow
  def new_model_fn(features, labels, mode, config):  # pylint: disable=missing-docstring
    spec = estimator.model_fn(features, labels, mode, config)
    predictions = spec.predictions
    if predictions is None:
      return spec
    verify_keys_and_predictions(features, predictions)
    for key in get_keys(features):
      feature = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(
          features[key])
      if sparse_default_values and (key in sparse_default_values):
        if not isinstance(feature, sparse_tensor_lib.SparseTensor):
          raise ValueError(
              'Feature ({}) is expected to be a `SparseTensor`.'.format(key))
        feature = sparse_ops.sparse_tensor_to_dense(
            feature, default_value=sparse_default_values[key])
      if not isinstance(feature, ops.Tensor):
        raise ValueError(
            'Feature ({}) should be a Tensor. Please use `keys` '
            'argument of forward_features to filter unwanted features, or'
            'add key to argument `sparse_default_values`.'
            'Type of features[{}] is {}.'.format(key, key, type(feature)))
      predictions[key] = feature
    spec = spec._replace(predictions=predictions)
    if spec.export_outputs:
      for ekey in ['predict', 'serving_default']:
        if (ekey in spec.export_outputs and
            isinstance(spec.export_outputs[ekey],
                       PredictOutput)):
          export_outputs = spec.export_outputs[ekey].outputs
          for key in get_keys(features):
            export_outputs[key] = predictions[key]

    return spec
コード例 #22
0
    def _get_sequence_dense_tensor(self,
                                   inputs,
                                   weight_collections=None,
                                   trainable=None):
        # Do nothing with weight_collections and trainable since no variables are
        # created in this function.
        del weight_collections
        del trainable
        sp_tensor = inputs.get(self)
        dense_tensor = sparse_ops.sparse_tensor_to_dense(
            sp_tensor, default_value=self.default_value)
        # Reshape into [batch_size, T, variable_shape].
        dense_shape = array_ops.concat(
            [array_ops.shape(dense_tensor)[:1], [-1], self._variable_shape],
            axis=0)
        dense_tensor = array_ops.reshape(dense_tensor, shape=dense_shape)

        # Get the number of timesteps per example
        # For the 2D case, the raw values are grouped according to num_elements;
        # for the 3D case, the grouping happens in the third dimension, and
        # sequence length is not affected.
        num_elements = (self._variable_shape.num_elements()
                        if sp_tensor.shape.ndims == 2 else 1)
        seq_length = fc_utils.sequence_length_from_sparse_tensor(
            sp_tensor, num_elements=num_elements)

        return fc._SequenceDenseColumn.TensorSequenceLengthPair(
            dense_tensor=dense_tensor, sequence_length=seq_length)
コード例 #23
0
  def _get_sequence_dense_tensor(
      self, inputs, weight_collections=None, trainable=None):
    # Do nothing with weight_collections and trainable since no variables are
    # created in this function.
    del weight_collections
    del trainable
    sp_tensor = inputs.get(self)
    dense_tensor = sparse_ops.sparse_tensor_to_dense(
        sp_tensor, default_value=self.default_value)
    # Reshape into [batch_size, T, variable_shape].
    dense_shape = array_ops.concat(
        [array_ops.shape(dense_tensor)[:1], [-1], self._variable_shape],
        axis=0)
    dense_tensor = array_ops.reshape(dense_tensor, shape=dense_shape)

    # Get the number of timesteps per example
    # For the 2D case, the raw values are grouped according to num_elements;
    # for the 3D case, the grouping happens in the third dimension, and
    # sequence length is not affected.
    num_elements = (self._variable_shape.num_elements()
                    if sp_tensor.shape.ndims == 2 else 1)
    seq_length = fc._sequence_length_from_sparse_tensor(
        sp_tensor, num_elements=num_elements)

    return fc._SequenceDenseColumn.TensorSequenceLengthPair(
        dense_tensor=dense_tensor, sequence_length=seq_length)
コード例 #24
0
  def call(self, inputs):
    self._called = True
    if self._max_tokens is None:
      out_depth = K.get_value(self.num_elements)
    else:
      out_depth = self._max_tokens

    # If the input is a sparse tensor, we densify it with the default value of
    # -1. Because -1 is ignored by one_hot, this effectively drops the non-set
    # positions from the output encoding.
    if isinstance(inputs, sparse_tensor.SparseTensor):
      inputs = sparse_ops.sparse_tensor_to_dense(inputs, default_value=-1)

    if self._output_mode == BINARY:
      bool_one_hot_data = array_ops.one_hot(
          inputs, depth=out_depth, on_value=True, off_value=False)
      reduced_bool_data = math_ops.reduce_any(bool_one_hot_data, axis=1)
      binary_data = math_ops.cast(reduced_bool_data, dtypes.int64)
      binary_data.set_shape(tensor_shape.TensorShape((None, out_depth)))
      return binary_data

    one_hot_data = array_ops.one_hot(inputs, depth=out_depth)
    counts = math_ops.reduce_sum(one_hot_data, axis=1)
    if self._output_mode == COUNT:
      count_data = math_ops.cast(counts, dtypes.int64)
      count_data.set_shape(tensor_shape.TensorShape((None, out_depth)))
      return count_data

    tf_idf_data = math_ops.multiply(counts, self.tf_idf_weights)
    tf_idf_data.set_shape(tensor_shape.TensorShape((None, out_depth)))
    if self._output_mode == TFIDF:
      return tf_idf_data

    # We can only get here if we didn't recognize the passed mode.
    raise ValueError("Unknown output mode %s" % self._output_mode)
コード例 #25
0
def convert_to_list(values, sparse_default_value=None):
  """Convert a TensorLike, CompositeTensor, or ndarray into a Python list."""
  if tf_utils.is_ragged(values):
    # There is a corner case when dealing with ragged tensors: if you get an
    # actual RaggedTensor (not a RaggedTensorValue) passed in non-eager mode,
    # you can't call to_list() on it without evaluating it first. However,
    # because we don't yet fully support composite tensors across Keras,
    # K.get_value() won't evaluate the tensor.
    # TODO(momernick): Get Keras to recognize composite tensors as Tensors
    # and then replace this with a call to K.get_value.
    if (isinstance(values, ragged_tensor.RaggedTensor) and
        not context.executing_eagerly()):
      values = K.get_session(values).run(values)
    values = values.to_list()

  if isinstance(values,
                (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
    if sparse_default_value is None:
      if dtypes.as_dtype(values.values.dtype) == dtypes.string:
        sparse_default_value = ''
      else:
        sparse_default_value = -1
    dense_tensor = sparse_ops.sparse_tensor_to_dense(
        values, default_value=sparse_default_value)
    values = K.get_value(dense_tensor)

  if isinstance(values, ops.Tensor):
    values = K.get_value(values)

  # We may get passed a ndarray or the code above may give us a ndarray.
  # In either case, we want to force it into a standard python list.
  if isinstance(values, np.ndarray):
    values = values.tolist()

  return values
コード例 #26
0
    def testCwiseDivAndMul(self):
        np.random.seed(1618)
        sp_shapes = [(10, 10, 10), (5, 5), (1618, ), (3, 3, 7)]
        dense_shapes = [(10, 10, 1), (5, 5), (1, ), (1, 7)]

        with test_util.force_cpu():
            for dtype in [np.float32, np.float64, np.int32, np.int64]:
                for sp_shape, dense_shape in zip(sp_shapes, dense_shapes):
                    sp_vals_np = np.random.rand(*sp_shape).astype(dtype) + 1
                    dense_vals_np = np.random.rand(
                        *dense_shape).astype(dtype) + 1
                    sp_t, unused_nnz = _sparsify(sp_vals_np, thresh=1.5)
                    sp_t_densified = sparse_ops.sparse_tensor_to_dense(sp_t)
                    dense_t = constant_op.constant(dense_vals_np)

                    self._check(sp_t / dense_t, sp_t_densified / dense_vals_np,
                                sp_t)
                    # Check commutative.
                    self._check(sp_t * dense_t, sp_t_densified * dense_vals_np,
                                sp_t)
                    self._check(dense_t * sp_t, sp_t_densified * dense_vals_np,
                                sp_t)

                    if dtype in [np.int32, np.int64]:
                        res = sp_t / dense_t  # should invoke "__truediv__"
                        self.assertEqual(res.values.dtype, np.float64)
コード例 #27
0
    def call(self, inputs, count_weights=None):
        if isinstance(inputs, (list, np.ndarray)):
            inputs = ops.convert_to_tensor_v2(inputs)
        if inputs.shape.rank == 1:
            inputs = array_ops.expand_dims(inputs, 1)

        if count_weights is not None and self._output_mode != COUNT:
            raise ValueError(
                "count_weights is not used in `output_mode='tf-idf'`, "
                "or `output_mode='binary'`. Please pass a single input.")
        self._called = True
        if self._max_tokens is None:
            out_depth = K.get_value(self.num_elements)
            if out_depth == 0:
                raise RuntimeError(
                    "If you construct a `CategoryEncoding` layer with "
                    "`max_tokens=None`, you need to call `adapt()` "
                    "on it before using it")
        else:
            out_depth = self._max_tokens

        if self._output_mode == TFIDF:
            # If the input is a sparse tensor, we densify it with the default value of
            # -1. Because -1 is ignored by one_hot, this effectively drops the non-set
            # positions from the output encoding.
            if self._sparse:
                raise ValueError("`sparse=True` with `output_mode=tfidf` "
                                 "is not supported.")
            if isinstance(inputs, sparse_tensor.SparseTensor):
                inputs = sparse_ops.sparse_tensor_to_dense(inputs,
                                                           default_value=-1)
            one_hot_data = array_ops.one_hot(inputs, depth=out_depth)
            counts = math_ops.reduce_sum(one_hot_data, axis=1)
            tf_idf_data = math_ops.multiply(counts, self.tf_idf_weights)
            tf_idf_data.set_shape(tensor_shape.TensorShape((None, out_depth)))
            return tf_idf_data

        binary_output = (self._output_mode == BINARY)
        if self._sparse:
            result = bincount_ops.sparse_bincount(inputs,
                                                  weights=count_weights,
                                                  minlength=out_depth,
                                                  axis=-1,
                                                  binary_output=binary_output)
            result = math_ops.cast(result, K.floatx())
            batch_size = array_ops.shape(result)[0]
            result = sparse_tensor.SparseTensor(
                indices=result.indices,
                values=result.values,
                dense_shape=[batch_size, out_depth])
            return result
        else:
            result = bincount_ops.bincount(inputs,
                                           weights=count_weights,
                                           minlength=out_depth,
                                           dtype=K.floatx(),
                                           axis=-1,
                                           binary_output=binary_output)
            result.set_shape(tensor_shape.TensorShape((None, out_depth)))
            return result
コード例 #28
0
  def call(self, inputs):
    self._called = True
    if self._max_tokens is None:
      out_depth = K.get_value(self.num_elements)
    else:
      out_depth = self._max_tokens

    if self._output_mode == TFIDF:
      # If the input is a sparse tensor, we densify it with the default value of
      # -1. Because -1 is ignored by one_hot, this effectively drops the non-set
      # positions from the output encoding.
      if isinstance(inputs, sparse_tensor.SparseTensor):
        inputs = sparse_ops.sparse_tensor_to_dense(inputs, default_value=-1)
      one_hot_data = array_ops.one_hot(inputs, depth=out_depth)
      counts = math_ops.reduce_sum(one_hot_data, axis=1)
      tf_idf_data = math_ops.multiply(counts, self.tf_idf_weights)
      tf_idf_data.set_shape(tensor_shape.TensorShape((None, out_depth)))
      return tf_idf_data

    binary_output = (self._output_mode == BINARY)
    if self._sparse:
      return bincount_ops.sparse_bincount(
          inputs, minlength=out_depth, axis=-1, binary_output=binary_output)
    else:
      result = bincount_ops.bincount(
          inputs,
          minlength=out_depth,
          dtype=dtypes.int64,
          axis=-1,
          binary_output=binary_output)
      result.set_shape(tensor_shape.TensorShape((None, out_depth)))
      return result
コード例 #29
0
    def tensors_to_item(self, keys_to_tensors):
        """Maps the given dictionary of tensors to a concatenated list of keypoints.

    Args:
      keys_to_tensors: a mapping of TF-Example keys to parsed tensors.

    Returns:
      [time, num_keypoints, 2] tensor of keypoint coordinates, in order [y, x].
          Whether the tensor is a SparseTensor or a dense Tensor is determined
          by the return_dense parameter. Empty positions in the sparse tensor
          are filled with -1.0 values.
    """
        coordinates = []
        for key in self._full_keys:
            value = keys_to_tensors[key]
            expanded_dims = array_ops.concat([
                math_ops.to_int64(array_ops.shape(value)),
                constant_op.constant([1], dtype=dtypes.int64)
            ], 0)
            coordinate = sparse_ops.sparse_reshape(value, expanded_dims)
            coordinates.append(coordinate)
        keypoints = sparse_ops.sparse_concat(2, coordinates)
        if self._return_dense:
            keypoints = sparse_ops.sparse_tensor_to_dense(
                keypoints, default_value=self._default_value)
        return keypoints
コード例 #30
0
  def get_sequence_dense_tensor(self, transformation_cache, state_manager):
    """Returns a `TensorSequenceLengthPair`.

    Args:
      transformation_cache: A `FeatureTransformationCache` object to access
        features.
      state_manager: A `StateManager` to create / access resources such as
        lookup tables.
    """
    sp_tensor = transformation_cache.get(self, state_manager)
    dense_tensor = sparse_ops.sparse_tensor_to_dense(
        sp_tensor, default_value=self.default_value)
    # Reshape into [batch_size, T, variable_shape].
    dense_shape = array_ops.concat(
        [array_ops.shape(dense_tensor)[:1], [-1], self.variable_shape],
        axis=0)
    dense_tensor = array_ops.reshape(dense_tensor, shape=dense_shape)

    # Get the number of timesteps per example
    # For the 2D case, the raw values are grouped according to num_elements;
    # for the 3D case, the grouping happens in the third dimension, and
    # sequence length is not affected.
    num_elements = (self.variable_shape.num_elements()
                    if sp_tensor.shape.ndims == 2 else 1)
    seq_length = fc_old._sequence_length_from_sparse_tensor(
        sp_tensor, num_elements=num_elements)

    return fc.SequenceDenseColumn.TensorSequenceLengthPair(
        dense_tensor=dense_tensor, sequence_length=seq_length)
コード例 #31
0
    def test_dense_input_sparse_output(self):
        input_array = constant_op.constant([[1, 2, 3], [3, 3, 0]])

        # The expected output should be (X for missing value):
        # [[X, 1, 1, 1]
        #  [1, X, X, X]
        #  [X, X, X, 2]]
        expected_indices = [[0, 1], [0, 2], [0, 3], [1, 0], [1, 3]]
        expected_values = [1, 1, 1, 1, 2]
        max_tokens = 6

        input_data = keras.Input(shape=(None, ), dtype=dtypes.int32)
        layer = get_layer_class()(max_tokens=max_tokens,
                                  output_mode=categorical_encoding.COUNT,
                                  sparse=True)
        int_data = layer(input_data)

        model = keras.Model(inputs=input_data, outputs=int_data)
        sp_output_dataset = model.predict(input_array, steps=1)
        self.assertAllEqual(expected_values, sp_output_dataset.values)
        self.assertAllEqual(expected_indices, sp_output_dataset.indices)

        # Assert sparse output is same as dense output.
        layer = get_layer_class()(max_tokens=max_tokens,
                                  output_mode=categorical_encoding.COUNT,
                                  sparse=False)
        int_data = layer(input_data)
        model = keras.Model(inputs=input_data, outputs=int_data)
        output_dataset = model.predict(input_array, steps=1)
        self.assertAllEqual(
            sparse_ops.sparse_tensor_to_dense(sp_output_dataset,
                                              default_value=0), output_dataset)
コード例 #32
0
 def test_crossing_sparse_inputs_depth_tuple(self):
     layer = category_crossing.CategoryCrossing(depth=(2, 3))
     inputs_0 = sparse_tensor.SparseTensor(indices=[[0, 0], [1, 0], [2, 0]],
                                           values=['a', 'b', 'c'],
                                           dense_shape=[3, 1])
     inputs_1 = sparse_tensor.SparseTensor(indices=[[0, 0], [1, 0], [2, 0]],
                                           values=['d', 'e', 'f'],
                                           dense_shape=[3, 1])
     inputs_2 = sparse_tensor.SparseTensor(indices=[[0, 0], [1, 0], [2, 0]],
                                           values=['g', 'h', 'i'],
                                           dense_shape=[3, 1])
     inp_0_t = input_layer.Input(shape=(1, ),
                                 sparse=True,
                                 dtype=dtypes.string)
     inp_1_t = input_layer.Input(shape=(1, ),
                                 sparse=True,
                                 dtype=dtypes.string)
     inp_2_t = input_layer.Input(shape=(1, ),
                                 sparse=True,
                                 dtype=dtypes.string)
     out_t = layer([inp_0_t, inp_1_t, inp_2_t])
     model = training.Model([inp_0_t, inp_1_t, inp_2_t], out_t)
     output = model.predict([inputs_0, inputs_1, inputs_2])
     self.assertIsInstance(output, sparse_tensor.SparseTensor)
     output = sparse_ops.sparse_tensor_to_dense(output)
     expected_outputs_0 = [[b'a_X_d', b'a_X_g', b'd_X_g', b'a_X_d_X_g']]
     expected_outputs_1 = [[b'b_X_e', b'b_X_h', b'e_X_h', b'b_X_e_X_h']]
     expected_outputs_2 = [[b'c_X_f', b'c_X_i', b'f_X_i', b'c_X_f_X_i']]
     expected_out = array_ops.concat(
         [expected_outputs_0, expected_outputs_1, expected_outputs_2],
         axis=0)
     self.assertAllEqual(expected_out, output)
コード例 #33
0
    def _testSparseReduceShape(self, sp_t, reduction_axes, ndims, keep_dims,
                               do_sum):
        densified = self.evaluate(sparse_ops.sparse_tensor_to_dense(sp_t))

        np_op = np.sum
        tf_op = sparse_ops.sparse_reduce_sum
        if not do_sum:
            np_op = np.max
            tf_op = sparse_ops.sparse_reduce_max

        np_ans = densified
        if reduction_axes is None:
            np_ans = np_op(np_ans, keepdims=keep_dims)
        else:
            if not isinstance(reduction_axes, list):  # Single scalar.
                reduction_axes = [reduction_axes]
            reduction_axes = np.array(reduction_axes).astype(np.int32)
            # Handles negative axes.
            reduction_axes = (reduction_axes + ndims) % ndims
            # Loop below depends on sorted.
            reduction_axes.sort()
            for ra in reduction_axes.ravel()[::-1]:
                np_ans = np_op(np_ans, axis=ra, keepdims=keep_dims)

        tf_ans = tf_op(sp_t, reduction_axes, keep_dims)
        self.assertAllEqual(np_ans.shape, tf_ans.get_shape().as_list())
コード例 #34
0
ファイル: sparse_ops_test.py プロジェクト: eyx092/tensorflow
 def testSparseTensorToDenseString(self):
   sp = sparse_tensor.SparseTensor(
       indices=[[0, 0], [1, 2]], values=['a', 'b'], dense_shape=[2, 3])
   dense = sparse_ops.sparse_tensor_to_dense(sp)
   expected_dense = [[b'a', b'', b''], [b'', b'', b'b']]
   result_dense = self.evaluate(dense)
   self.assertAllEqual(expected_dense, result_dense)
コード例 #35
0
ファイル: hashing.py プロジェクト: zhufan35/tensorflow
 def _process_input_list(self, inputs):
   # TODO(momernick): support ragged_cross_hashed with corrected fingerprint
   # and siphash.
   if any(isinstance(inp, ragged_tensor.RaggedTensor) for inp in inputs):
     raise ValueError('Hashing with ragged input is not supported yet.')
   sparse_inputs = [
       inp for inp in inputs if isinstance(inp, sparse_tensor.SparseTensor)
   ]
   dense_inputs = [
       inp for inp in inputs if not isinstance(inp, sparse_tensor.SparseTensor)
   ]
   all_dense = True if not sparse_inputs else False
   indices = [sp_inp.indices for sp_inp in sparse_inputs]
   values = [sp_inp.values for sp_inp in sparse_inputs]
   shapes = [sp_inp.dense_shape for sp_inp in sparse_inputs]
   indices_out, values_out, shapes_out = gen_sparse_ops.SparseCrossHashed(
       indices=indices,
       values=values,
       shapes=shapes,
       dense_inputs=dense_inputs,
       num_buckets=self.num_bins,
       strong_hash=self.strong_hash,
       salt=self.salt)
   sparse_out = sparse_tensor.SparseTensor(indices_out, values_out, shapes_out)
   if all_dense:
     return sparse_ops.sparse_tensor_to_dense(sparse_out)
   return sparse_out
コード例 #36
0
 def testComplex(self):
     for dtype in [dtypes.complex64, dtypes.complex128]:
         tf_val = math_ops.cast(
             constant_op.constant([1.0 + 1.0j, 2.0 - 2.0j]),
             dtypes.complex128)
         tf_ans = sparse_ops.sparse_tensor_to_dense(
             sparse_ops.from_dense(tf_val))
         self.assertAllClose(tf_val, tf_ans)
コード例 #37
0
    def call(self, inputs, count_weights=None):
        if isinstance(inputs, (list, np.ndarray)):
            inputs = ops.convert_to_tensor_v2_with_dispatch(inputs)
        if inputs.shape.rank == 1:
            inputs = array_ops.expand_dims(inputs, 1)

        if count_weights is not None and self.output_mode != COUNT:
            raise ValueError(
                "count_weights is not used in `output_mode='tf-idf'`, "
                "or `output_mode='binary'`. Please pass a single input.")
        self._called = True
        if self.max_tokens is None:
            raise RuntimeError(
                "If you construct a `CategoryEncoding` layer with "
                "`max_tokens=None`, you need to call `adapt()` "
                "on it before using it")
        else:
            out_depth = self.max_tokens

        if self.output_mode == TFIDF:
            # If the input is a sparse tensor, we densify it with the default value of
            # -1. Because -1 is ignored by one_hot, this effectively drops the non-set
            # positions from the output encoding.
            if self.sparse:
                raise ValueError("`sparse=True` with `output_mode=tfidf` "
                                 "is not supported.")
            if isinstance(inputs, sparse_tensor.SparseTensor):
                inputs = sparse_ops.sparse_tensor_to_dense(inputs,
                                                           default_value=-1)
            one_hot_data = array_ops.one_hot(inputs, depth=out_depth)
            counts = math_ops.reduce_sum(one_hot_data, axis=1)
            tf_idf_data = math_ops.multiply(counts, self.tf_idf_weights)
            tf_idf_data.set_shape(tensor_shape.TensorShape((None, out_depth)))
            return tf_idf_data

        binary_output = (self.output_mode == BINARY)
        if isinstance(inputs, sparse_tensor.SparseTensor):
            max_value = math_ops.reduce_max(inputs.values)
            min_value = math_ops.reduce_min(inputs.values)
        else:
            max_value = math_ops.reduce_max(inputs)
            min_value = math_ops.reduce_min(inputs)
        condition = math_ops.logical_and(
            math_ops.greater(math_ops.cast(out_depth, max_value.dtype),
                             max_value),
            math_ops.greater_equal(min_value,
                                   math_ops.cast(0, min_value.dtype)))
        control_flow_ops.Assert(condition, [
            "Input values must be in the range 0 <= values < max_tokens"
            " with max_tokens={}".format(out_depth)
        ])
        if self.sparse:
            return sparse_bincount(inputs, out_depth, binary_output,
                                   count_weights)
        else:
            return dense_bincount(inputs, out_depth, binary_output,
                                  count_weights)
コード例 #38
0
ファイル: layers.py プロジェクト: nininininini/tensorx
    def __init__(self, layer):
        super().__init__(layer, layer.n_units, layer.shape, layer.dtype,
                         layer.name + "_dense")

        with name_scope(self.name):
            if layer.is_sparse():
                self.tensor = sparse_ops.sparse_tensor_to_dense(layer.tensor)
            else:
                self._forward(layer)
コード例 #39
0
  def testDenseSequencesToSparse(self):
    labels = [[1, 3, 3, 3, 0],
              [1, 4, 4, 4, 0],
              [4, 2, 2, 9, 4]]
    length = [4, 5, 5]
    sparse = ctc_ops.dense_labels_to_sparse(labels, length)
    new_dense = sparse_ops.sparse_tensor_to_dense(sparse)

    self.assertAllEqual(labels, new_dense)

    padded_labels = [[1, 3, 3, 3, 0, 0, 0, 0],
                     [1, 4, 4, 4, 0, 0, 0, 0],
                     [4, 2, 2, 9, 4, 0, 0, 0]]
    length = [4, 5, 5]
    sparse = ctc_ops.dense_labels_to_sparse(padded_labels, length)
    padded_dense = sparse_ops.sparse_tensor_to_dense(sparse)

    self.assertAllEqual(padded_dense, new_dense)
コード例 #40
0
  def testPaddingOnlySparse(self):
    ind1 = np.array([[0], [2]])
    val1 = np.array([3, 4])
    shape1 = np.array([4])

    ind2 = np.array([[1], [2]])
    val2 = np.array([9, 12])
    shape2 = np.array([5])

    with ops.Graph().as_default() as g, self.test_session(graph=g):
      sp_tensor1 = sparse_tensor.SparseTensor(
          indices=array_ops.constant(ind1, dtypes.int64),
          values=array_ops.constant(val1, dtypes.int64),
          dense_shape=array_ops.constant(shape1, dtypes.int64))
      sp_tensor2 = sparse_tensor.SparseTensor(
          indices=array_ops.constant(ind2, dtypes.int64),
          values=array_ops.constant(val2, dtypes.int64),
          dense_shape=array_ops.constant(shape2, dtypes.int64))

      sp_tensor1_expected = sparse_tensor.SparseTensor(
          indices=sp_tensor1.indices,
          values=sp_tensor1.values,
          dense_shape=[8])
      sp_tensor2_expected = sparse_tensor.SparseTensor(
          indices=sp_tensor2.indices,
          values=sp_tensor2.values,
          dense_shape=[8])

      sequences = {
          "key_1": sp_tensor1,
          "key_2": sp_tensor2,
      }
      _, padded_seq = sqss._padding(sequences, 4)

      expected_padded_seq = {
          "key_1": sp_tensor1_expected,
          "key_2": sp_tensor2_expected,
      }

      for key, val in expected_padded_seq.items():
        self.assertAllEqual(
            sparse_ops.sparse_tensor_to_dense(val).eval(),
            sparse_ops.sparse_tensor_to_dense(padded_seq[key]).eval())
コード例 #41
0
 def testDistributeSparse(self):
     dispatcher, workers = self.start_cluster(1)  # to avoid gcing workers, pylint: disable=unused-variable
     element = sparse_tensor.SparseTensor(indices=[[0]],
                                          values=constant_op.constant(
                                              [0], dtype=dtypes.int32),
                                          dense_shape=[1])
     ds = dataset_ops.Dataset.from_tensors(element)
     ds = _make_distributed_dataset(ds, dispatcher)
     results = [sparse_ops.sparse_tensor_to_dense(elem) for elem in ds]
     self.assertAllEqual(results, [[0]])
コード例 #42
0
 def tensors_to_item(self, keys_to_tensors):
   tensor = keys_to_tensors[self._tensor_key]
   shape = self._shape
   if self._shape_keys:
     shape_dims = []
     for k in self._shape_keys:
       shape_dim = keys_to_tensors[k]
       if isinstance(shape_dim, ops.SparseTensor):
         shape_dim = sparse_ops.sparse_tensor_to_dense(shape_dim)
       shape_dims.append(shape_dim)
     shape = array_ops.squeeze(array_ops.pack(shape_dims))
   if isinstance(tensor, ops.SparseTensor):
     if shape is not None:
       tensor = sparse_ops.sparse_reshape(tensor, shape)
     tensor = sparse_ops.sparse_tensor_to_dense(tensor, self._default_value)
   else:
     if shape is not None:
       tensor = array_ops.reshape(tensor, shape)
   return tensor
コード例 #43
0
 def call(self, inputs):
   if isinstance(inputs, ragged_tensor.RaggedTensor):
     return inputs.to_tensor(default_value=self._default_value)
   elif isinstance(inputs, sparse_tensor.SparseTensor):
     return sparse_ops.sparse_tensor_to_dense(
         inputs, default_value=self._default_value)
   elif isinstance(inputs, ops.Tensor):
     return inputs
   else:
     raise TypeError("Unexpected tensor type %s" % type(inputs).__name__)
コード例 #44
0
  def testConsumers(self):
    sp = sparse_tensor.SparseTensor([[0, 0], [1, 2]], [1.0, 3.0], [3, 4])
    w = ops.convert_to_tensor(np.ones([4, 1], np.float32))
    out = sparse_ops.sparse_tensor_dense_matmul(sp, w)
    self.assertEqual(len(sp.consumers()), 1)
    self.assertEqual(sp.consumers()[0], out.op)

    dense = sparse_ops.sparse_tensor_to_dense(sp)
    self.assertEqual(len(sp.consumers()), 2)
    self.assertTrue(dense.op in sp.consumers())
    self.assertTrue(out.op in sp.consumers())
コード例 #45
0
    def tensors_to_item(self, keys_to_tensors):
        indices = keys_to_tensors[self._indices_key]
        values = keys_to_tensors[self._values_key]
        if self._shape_key:
            shape = keys_to_tensors[self._shape_key]
            if isinstance(shape, ops.SparseTensor):
                shape = sparse_ops.sparse_tensor_to_dense(shape)
        elif self._shape:
            shape = self._shape
        else:
            shape = indices.shape
        indices_shape = array_ops.shape(indices.indices)
        rank = indices_shape[1]
        ids = math_ops.to_int64(indices.values)
        indices_columns_to_preserve = array_ops.slice(indices.indices, [0, 0], array_ops.pack([-1, rank - 1]))
        new_indices = array_ops.concat(1, [indices_columns_to_preserve, array_ops.reshape(ids, [-1, 1])])

        tensor = ops.SparseTensor(new_indices, values.values, shape)
        if self._densify:
            tensor = sparse_ops.sparse_tensor_to_dense(tensor, self._default_value)
        return tensor
コード例 #46
0
 def test_hashed__has_no_collision(self):
   """Tests that fingerprint concatenation has no collisions."""
   # Although the last 10 bits of 359 and 1024+359 are identical.
   # As a result, all the crosses shouldn't collide.
   t1 = constant_op.constant([[359], [359 + 1024]])
   t2 = constant_op.constant([list(range(10)), list(range(10))])
   cross = sparse_ops.sparse_cross_hashed(
       [t2, t1], num_buckets=1024, hash_key=sparse_ops._DEFAULT_HASH_KEY + 1)
   cross_dense = sparse_ops.sparse_tensor_to_dense(cross)
   with session.Session():
     values = cross_dense.eval()
     self.assertTrue(numpy.not_equal(values[0], values[1]).all())
コード例 #47
0
 def test_hashed_output_v1_has_collision(self):
   """Tests the old version of the fingerprint concatenation has collisions.
   """
   # The last 10 bits of 359 and 1024+359 are identical.
   # As a result, all the crosses collide.
   t1 = constant_op.constant([[359], [359 + 1024]])
   t2 = constant_op.constant([list(range(10)), list(range(10))])
   cross = sparse_feature_cross_op.sparse_feature_cross(
       [t2, t1], hashed_output=True, num_buckets=1024)
   cross_dense = sparse_ops.sparse_tensor_to_dense(cross)
   with session.Session():
     values = cross_dense.eval()
     self.assertTrue(numpy.equal(values[0], values[1]).all())
コード例 #48
0
 def testTranspose(self):
   with self.test_session(use_gpu=False):
     np.random.seed(1618)
     shapes = [np.random.randint(1, 10, size=rank) for rank in range(1, 6)]
     for shape in shapes:
       for dtype in [np.int32, np.int64, np.float32, np.float64]:
         dn_input = np.random.randn(*shape).astype(dtype)
         rank = array_ops.rank(dn_input).eval()
         perm = np.random.choice(rank, rank, False)
         sp_input, unused_a_nnz = _sparsify(dn_input)
         sp_trans = sparse_ops.sparse_transpose(sp_input, perm=perm)
         dn_trans = sparse_ops.sparse_tensor_to_dense(sp_trans).eval()
         expected_trans = array_ops.transpose(dn_input, perm=perm).eval()
         self.assertAllEqual(dn_trans, expected_trans)
コード例 #49
0
 def test_hashed_output_v2_has_no_collision(self):
   """Tests the new version of the fingerprint concatenation has no collisions.
   """
   # Although the last 10 bits of 359 and 1024+359 are identical.
   # As a result, all the crosses shouldn't collide.
   t1 = constant_op.constant([[359], [359 + 1024]])
   t2 = constant_op.constant([list(range(10)), list(range(10))])
   cross = sparse_feature_cross_op.sparse_feature_cross(
       [t2, t1],
       hashed_output=True,
       num_buckets=1024,
       hash_key=layers.SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY)
   cross_dense = sparse_ops.sparse_tensor_to_dense(cross)
   with session.Session():
     values = cross_dense.eval()
     self.assertTrue(numpy.not_equal(values[0], values[1]).all())
コード例 #50
0
  def testBasic(self):
    with self.test_session() as sess:
      content = [
          "1 1:3.4 2:0.5 4:0.231", "1 2:2.5 3:inf 5:0.503",
          "2 3:2.5 2:nan 1:0.105"
      ]
      sparse_features, labels = libsvm_ops.decode_libsvm(
          content, num_features=6)
      features = sparse_ops.sparse_tensor_to_dense(
          sparse_features, validate_indices=False)

      self.assertAllEqual(labels.get_shape().as_list(), [3])

      features, labels = sess.run([features, labels])
      self.assertAllEqual(labels, [1, 1, 2])
      self.assertAllClose(
          features, [[0, 3.4, 0.5, 0, 0.231, 0], [0, 0, 2.5, np.inf, 0, 0.503],
                     [0, 0.105, np.nan, 2.5, 0, 0]])
コード例 #51
0
ファイル: sparse_ops_test.py プロジェクト: Wajih-O/tensorflow
  def testTranspose(self):
    if np.__version__ == "1.13.0":
      self.skipTest("numpy 1.13.0 bug")

    with test_util.force_cpu():
      np.random.seed(1618)
      shapes = [np.random.randint(1, 10, size=rank) for rank in range(1, 6)]
      for shape in shapes:
        for dtype in [np.int32, np.int64, np.float32, np.float64]:
          dn_input = np.random.randn(*shape).astype(dtype)
          rank = self.evaluate(array_ops.rank(dn_input))
          perm = np.random.choice(rank, rank, False)
          sp_input, unused_a_nnz = _sparsify(dn_input)
          sp_trans = sparse_ops.sparse_transpose(sp_input, perm=perm)
          dn_trans = sparse_ops.sparse_tensor_to_dense(sp_trans)
          expected_trans = array_ops.transpose(dn_input, perm=perm)
          self.assertAllEqual(expected_trans.shape, sp_trans.get_shape())
          self.assertAllEqual(dn_trans, expected_trans)
コード例 #52
0
  def testNDimension(self):
    with self.cached_session() as sess:
      content = [["1 1:3.4 2:0.5 4:0.231", "1 1:3.4 2:0.5 4:0.231"],
                 ["1 2:2.5 3:inf 5:0.503", "1 2:2.5 3:inf 5:0.503"],
                 ["2 3:2.5 2:nan 1:0.105", "2 3:2.5 2:nan 1:0.105"]]
      sparse_features, labels = libsvm_ops.decode_libsvm(
          content, num_features=6, label_dtype=dtypes.float64)
      features = sparse_ops.sparse_tensor_to_dense(
          sparse_features, validate_indices=False)

      self.assertAllEqual(labels.get_shape().as_list(), [3, 2])

      features, labels = sess.run([features, labels])
      self.assertAllEqual(labels, [[1, 1], [1, 1], [2, 2]])
      self.assertAllClose(
          features, [[[0, 3.4, 0.5, 0, 0.231, 0], [0, 3.4, 0.5, 0, 0.231, 0]], [
              [0, 0, 2.5, np.inf, 0, 0.503], [0, 0, 2.5, np.inf, 0, 0.503]
          ], [[0, 0.105, np.nan, 2.5, 0, 0], [0, 0.105, np.nan, 2.5, 0, 0]]])
コード例 #53
0
 def _get_sequence_dense_tensor(
     self, inputs, weight_collections=None, trainable=None):
   # Do nothing with weight_collections and trainable since no variables are
   # created in this function.
   del weight_collections
   del trainable
   sp_tensor = inputs.get(self)
   dense_tensor = sparse_ops.sparse_tensor_to_dense(
       sp_tensor, default_value=self.default_value)
   # Reshape into [batch_size, T, variable_shape].
   dense_shape = array_ops.concat(
       [array_ops.shape(dense_tensor)[:1], [-1], self._variable_shape],
       axis=0)
   dense_tensor = array_ops.reshape(dense_tensor, shape=dense_shape)
   sequence_length = fc._sequence_length_from_sparse_tensor(
       sp_tensor, num_elements=self._variable_shape.num_elements())
   return fc._SequenceDenseColumn.TensorSequenceLengthPair(
       dense_tensor=dense_tensor, sequence_length=sequence_length)
コード例 #54
0
ファイル: sparse_ops_test.py プロジェクト: 0-T-0/tensorflow
  def _compare(self, sp_t, reduction_axes, keep_dims):
    densified = sparse_ops.sparse_tensor_to_dense(sp_t).eval()

    np_ans = densified
    if reduction_axes is None:
      np_ans = np.sum(np_ans, keepdims=keep_dims)
    else:
      if isinstance(reduction_axes, list):
        reduction_axes = sorted(reduction_axes)  # loop below depends on sorted
      reduction_axes = np.array(reduction_axes).astype(np.int32)
      for ra in reduction_axes.ravel()[::-1]:
        np_ans = np.sum(np_ans, axis=ra, keepdims=keep_dims)

    with self.test_session():
      tf_ans = sparse_ops.sparse_reduce_sum(sp_t, reduction_axes, keep_dims)
      out = tf_ans.eval()

    self.assertAllClose(np_ans, out)
コード例 #55
0
def _s2d_add_vs_sparse_add(sparsity, n, m, num_iters=50):
  np.random.seed(1618)

  with session.Session(graph=ops.Graph()) as sess:
    sp_vals = np.random.rand(n, m).astype(np.float32)
    sp_t, unused_nnz = _sparsify(sp_vals, thresh=sparsity, index_dtype=np.int32)
    vals = np.random.rand(n, m).astype(np.float32)

    s2d = math_ops.add(
        sparse_ops.sparse_tensor_to_dense(sp_t), constant_op.constant(vals))
    sa = sparse_ops.sparse_add(sp_t, constant_op.constant(vals))

    timeit.timeit(lambda: sess.run(s2d), number=3)
    timeit.timeit(lambda: sess.run(sa), number=3)

    s2d_total = timeit.timeit(lambda: sess.run(s2d), number=num_iters)
    sa_total = timeit.timeit(lambda: sess.run(sa), number=num_iters)

  # per-iter latency; secs to millis
  return s2d_total * 1e3 / num_iters, sa_total * 1e3 / num_iters
コード例 #56
0
ファイル: model_util.py プロジェクト: SIMEXP/deepmotion
def to_dense(tensor):
    """Converts a sparse tensor into a dense tensor and returns it.
    Arguments:
      tensor: A tensor instance (potentially sparse).
    Returns:
      A dense tensor.
    Examples:
    ```python
      >>> from keras import backend as K
      >>> b = K.placeholder((2, 2), sparse=True)
      >>> print(K.is_sparse(b))
      True
      >>> c = K.to_dense(b)
      >>> print(K.is_sparse(c))
      False
    ```
    """
    if is_sparse(tensor):
        return sparse_ops.sparse_tensor_to_dense(tensor)
    else:
        return tensor
コード例 #57
0
  def testCwiseDivAndMul(self):
    np.random.seed(1618)
    sp_shapes = [(10, 10, 10), (5, 5), (1618,), (3, 3, 7)]
    dense_shapes = [(10, 10, 1), (5, 5), (1,), (1, 7)]

    with self.test_session(use_gpu=False):
      for dtype in [np.float32, np.float64, np.int32, np.int64]:
        for sp_shape, dense_shape in zip(sp_shapes, dense_shapes):
          sp_vals_np = np.random.rand(*sp_shape).astype(dtype) + 1
          dense_vals_np = np.random.rand(*dense_shape).astype(dtype) + 1
          sp_t, unused_nnz = _sparsify(sp_vals_np, thresh=1.5)
          sp_t_densified = sparse_ops.sparse_tensor_to_dense(sp_t).eval()
          dense_t = tf.constant(dense_vals_np)

          self._check(sp_t / dense_t, sp_t_densified / dense_vals_np, sp_t)
          # Check commutative.
          self._check(sp_t * dense_t, sp_t_densified * dense_vals_np, sp_t)
          self._check(dense_t * sp_t, sp_t_densified * dense_vals_np, sp_t)

          if dtype in [np.int32, np.int64]:
            res = sp_t / dense_t  # should invoke "__truediv__"
            self.assertEqual(res.values.eval().dtype, np.float64)
コード例 #58
0
def _sampled_scattered_embedding_lookup(
    params, values, dimension=None, sampled_candidates=None, hash_key=None,
    name=None):
  """Looks up embeddings using parameter hashing for each value in `values`.

  This method looks up selected embedding dimensions if `sampled_candidates` is
  given, otherwise looks up all dimensions.

  The i-th embedding component of a value v in `values` is found by retrieving
  the weight whose index is a fingerprint of the pair (v,i).
  The concept is explored as "feature hashing" for model compression in this
  paper: http://arxiv.org/pdf/1504.04788.pdf

  Feature hashing has the pleasant effect of allowing us to compute an embedding
  without needing a pre-determined vocabulary, relieving some amount of process
  complexity. It also allows for us to maintain embeddings for possibly
  trillions of features with a fixed amount of memory.

  Note that this is superior to out-of-vocabulary shared "hash buckets" in that
  the embedding is extremely likely to be unique for each token as opposed to
  being shared across probably-colliding tokens. The price is that we must
  compute a hash once for each scalar in the token's embedding as opposed to
  once per token.

  If `params` is a list, it represents a partition of the embedding parameters.
  Each tensor in the list should have the same length, except for the first ones
  which may have an additional element. For instance 10 parameters can be
  partitioned in 4 tensors with length `[3, 3, 2, 2]`.

  Args:
    params: A `Tensor`, `list` of `Tensors`, or `PartitionedVariable`.
      Each tensor must be of rank 1 with fully-defined shape.
    values: `Tensor` of values to be embedded with shape `[d0, ..., dn]`.
    dimension: Embedding dimension. The user must specify either `dimension` or
      `sampled_candidates`.
    sampled_candidates: An optional `Tensor` of slice indices to keep along the
      final dimension with shape `[d0, ..., dn, N]`. If given, `dimension` is
      ignored. If `None`, looks up all candidates.
    hash_key: Specify the hash_key that will be used by the `FingerprintCat64`
      function to combine the crosses fingerprints on SparseFeatureCrossOp
      (optional).
    name: An optional name for this op.

  Returns:
    A `Tensor` with shape `[d0, ..., dn, dimension]`.
    If `sampled_candidates` is given, the output shape is `[d0, ..., dn, N]`

  Raises:
    ValueError: if dimension is not positive or the partition size is invalid.
  """
  if isinstance(params, variables.PartitionedVariable):
    params = list(params)
  if not isinstance(params, list):
    params = [params]

  with ops.name_scope(name, "scattered_embedding_lookup",
                      params + [dimension, values]):
    # Flatten the values
    values_shape = array_ops.shape(values)
    values = array_ops.reshape(values, [-1, 1])

    if sampled_candidates is None:
      if dimension is None:
        raise ValueError(
            "You must specify either dimension or sampled_candidates.")
      if dimension <= 0:
        raise ValueError("Dimension must be >0. Given is %d" % dimension)
      sampled_candidates = array_ops.tile(array_ops.expand_dims(
          math_ops.range(0, dimension), 0), array_ops.shape(values))
    else:
      dimension = array_ops.shape(sampled_candidates)[
          math_ops.subtract(array_ops.rank(sampled_candidates), 1)]
      sampled_candidates_shape = array_ops.shape(sampled_candidates)
      dimension_tensor = array_ops.reshape(dimension, shape=[1,])
      expected_shape = array_ops.concat([values_shape, dimension_tensor], 0)
      with ops.control_dependencies([control_flow_ops.Assert(
          math_ops.reduce_all(math_ops.equal(sampled_candidates_shape,
                                             expected_shape)),
          ["The shape of sampled_candidates: ", sampled_candidates_shape,
           " does not match the shape of values: ", values_shape])]):
        # Flatten sampled_candidates, same way as values are flattened.
        sampled_candidates = array_ops.reshape(sampled_candidates,
                                               [-1, dimension])

    num_partitions = len(params)
    partition_sizes = []
    for p in range(num_partitions):
      shape = params[p].get_shape()
      shape.assert_has_rank(1)
      shape.assert_is_fully_defined()
      partition_sizes.append(shape[0].value)
    num_params = sum(partition_sizes)  # Total number of parameters.

    # Assert the size of each partition.
    for p in range(num_partitions):
      expected_size = (num_params - p - 1) // num_partitions + 1
      if partition_sizes[p] != expected_size:
        raise ValueError("Tensor %d in params has size %d, expected %d." %
                         (p, partition_sizes[p], expected_size))

    # With two values v1 and v2 and 3 dimensions, we will cross
    # [[0, 1, 2], [0, 1, 2]] with [[v1], [v2]].
    tensors_to_cross = [sampled_candidates, values]
    ids = sparse_feature_cross_op.sparse_feature_cross(
        tensors_to_cross, hashed_output=True, num_buckets=num_params,
        hash_key=hash_key)
    ids = sparse_ops.sparse_tensor_to_dense(ids)

    # No need to validate the indices since we have checked the params
    # dimensions and we know the largest id.
    result = embedding_ops.embedding_lookup(
        params, ids, partition_strategy="div")

    return array_ops.reshape(result,
                             array_ops.concat([values_shape, [dimension]], 0))
コード例 #59
0
ファイル: embedding_ops.py プロジェクト: AriaAsuka/tensorflow
def hashed_embedding_lookup(params, values, dimension, name=None):
  """Looks up embeddings using parameter hashing for each value in `values`.

  The i-th embedding component of a value v in `values` is found by retrieving
  the weight whose index is a fingerprint of the pair (v,i).
  The concept is explored as "feature hashing" for model compression in this
  paper: http://arxiv.org/pdf/1504.04788.pdf

  Feature hashing has the pleasant effect of allowing us to compute an embedding
  without needing a pre-determined vocabulary, relieving some amount of process
  complexity. It also allows for us to maintain embeddings for possibly
  trillions of features with a fixed amount of memory.

  Note that this is superior to out-of-vocabulary shared "hash buckets" in that
  the embedding is extremely likely to be unique for each token as opposed to
  being shared across probably-colliding tokens. The price is that we must
  compute a hash once for each scalar in the token's embedding as opposed to
  once per token.

  If `params` is a list, it represents a partition of the embedding parameters.
  Each tensor in the list should have the same length, except for the first ones
  which may have an additional element. For instance 10 parameters can be
  partitioned in 4 tensors with length `[3, 3, 2, 2]`.

  Args:
    params: A `Tensor` or `list` of `Tensors`.
      Each tensor must be of rank 1 with fully-defined shape.
    values: `Tensor` of values to be embedded.
    dimension: Embedding dimension
    name: An optional name for this op.

  Returns:
    A tensor with shape [d0, ..., dn, dimension]
      with shape(values) = [d0, ..., dn]

  Raises:
    ValueError: if dimension is not positive or the partition size is invalid.
  """
  if not isinstance(params, list):
    params = [params]

  with ops.name_scope(name, "hashed_embedding_lookup",
                      params + [dimension, values]):
    if dimension <= 0:
      raise ValueError("Dimension should be >0 not %d" % dimension)

    num_partitions = len(params)
    partition_sizes = []
    for p in range(num_partitions):
      shape = params[p].get_shape()
      shape.assert_has_rank(1)
      shape.assert_is_fully_defined()
      partition_sizes.append(shape[0].value)
    num_params = sum(partition_sizes)  # Total number of parameters.

    # Assert the size of each partition.
    for p in range(num_partitions):
      expected_size = (num_params - p - 1) // num_partitions + 1
      if partition_sizes[p] != expected_size:
        raise ValueError("Tensor %d in params has size %d, expected %d." %
                         (p, partition_sizes[p], expected_size))

    # Flatten the values
    values_shape = array_ops.shape(values)
    values = array_ops.reshape(values, [-1, 1])

    # With two values v1 and v2 and 3 dimensions, we will cross
    # [[0, 1, 2], [0, 1, 2]] with [[v1], [v2]].
    tensors_to_cross = [array_ops.tile(array_ops.expand_dims(
        math_ops.range(0, dimension), 0), array_ops.shape(values)), values]
    ids = sparse_feature_cross_op.sparse_feature_cross(
        tensors_to_cross, hashed_output=True, num_buckets=num_params)
    ids = sparse_ops.sparse_tensor_to_dense(ids)

    # No need to validate the indices since we have checked the params
    # dimensions and we know the largest id.
    result = embedding_ops.embedding_lookup(
        params, ids, partition_strategy="div", validate_indices=False)

    return array_ops.reshape(result, array_ops.concat(
        0, [values_shape, [dimension]]))