def get_dense_tensor(self, transformation_cache, state_manager):
        if tpu.under_tpu_inference_context():

            def host_computation():
                return fc_lib.EmbeddingColumn.get_dense_tensor(
                    self, transformation_cache, state_manager)

            return tpu.outside_compilation(host_computation)

        if _is_running_on_cpu():
            return fc_lib.EmbeddingColumn.get_dense_tensor(
                self, transformation_cache, state_manager)

        # TPU mode
        # Get the embeddings from the FeatureTransformationCache.
        tensor = transformation_cache.get(self.get_feature_key_name(),
                                          state_manager)

        return tensor
Ejemplo n.º 2
0
  def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
    if tpu.under_tpu_inference_context():
      def host_computation():
        return fc_lib.EmbeddingColumn._get_dense_tensor(
            self, inputs, weight_collections, trainable)
      return tpu.outside_compilation(host_computation)

    if _is_running_on_cpu():
      return fc_lib.EmbeddingColumn._get_dense_tensor(
          self, inputs, weight_collections, trainable)

    # TPU mode
    # Get the embeddings from the LazyBuilder.
    tensor = inputs.get(self.get_feature_key_name())

    # Add to collection for _create_tpu_embedding_variables_and_ops
    _record_variable_scope_and_name(self.get_embedding_var_name(),
                                    'embedding_weights')

    return tensor
    def _get_dense_tensor_internal(self, transformation_cache, state_manager):
        """Private method that follows _get_dense_tensor_internal."""
        _check_invalid_cases(self._embedding_lookup_device)
        # CPU Case.
        is_cpu = self._embedding_lookup_device == EmbeddingDevice.CPU
        is_cpu = is_cpu or _is_running_on_cpu()
        if is_cpu:
            return super(_TPUSharedDeviceSpecificEmbeddingColumnV2,
                         self)._get_dense_tensor_internal(
                             transformation_cache, state_manager)
        # TPU_EMBEDDING_CORE case.
        if self._embedding_lookup_device == EmbeddingDevice.TPU_EMBEDDING_CORE:
            return super(_TPUSharedDeviceSpecificEmbeddingColumnV2,
                         self)._get_dense_tensor_internal(
                             transformation_cache, state_manager)

        # TPU_EMBEDDING_CORE cases.
        if tpu.under_tpu_inference_context():
            # For inference, use outside compile to densify and pad the input tensors.
            sparse_tensor = transformation_cache.get(
                self.categorical_column.name, state_manager)

            def host_computation():
                return pad_sparse_embedding_lookup_indices(
                    sparse_tensor, self._tensor_core_shape[1])

            values, mask = tpu.outside_compilation(host_computation)
        else:
            # For training, the inputs should already have been densified and padded.
            values = transformation_cache.get(self.categorical_column.name,
                                              state_manager)
            mask = transformation_cache.get(
                self.categorical_column.name + _TENSOR_CORE_MASK_KEY_SUFFIX,
                state_manager)

        # Do a dense embedding lookup on TensorCore.
        embedding_weights = self.shared_embedding_column_creator.embedding_weights
        return sparse_embedding_aggregate_slice(embedding_weights,
                                                (values, mask),
                                                self.get_combiner())
Ejemplo n.º 4
0
  def _get_dense_tensor_internal(
      self, transformation_cache, state_manager):
    if tpu.under_tpu_inference_context():
      def host_computation():
        return fc_lib.SharedEmbeddingColumn._get_dense_tensor_internal(
            self, transformation_cache, state_manager)
      return tpu.outside_compilation(host_computation)

    if _is_running_on_cpu():
      return fc_lib.SharedEmbeddingColumn._get_dense_tensor_internal(
          self, transformation_cache, state_manager)

    # TPU mode
    # Get the embeddings from the FeatureTransformationCache.
    tensor = transformation_cache.get(self.get_feature_key_name(),
                                      state_manager)

    # Add to collection for _create_tpu_embedding_variables_and_ops
    # Note that in Feature Column V2, shared embeddings have no scope.
    _record_variable_scope_and_name(
        self.get_embedding_var_name(),
        self.shared_embedding_column_creator._name,
        is_shared_embedding=True)
    return tensor
Ejemplo n.º 5
0
  def get_sequence_dense_tensor(self, transformation_cache, state_manager):
    if tpu.under_tpu_inference_context():
      def host_computation():
        return fc_lib.EmbeddingColumn.get_sequence_dense_tensor(
            self, transformation_cache, state_manager)
      return tpu.outside_compilation(host_computation)

    if _is_running_on_cpu():
      return fc_lib.EmbeddingColumn.get_sequence_dense_tensor(
          self, transformation_cache, state_manager)

    tensor = transformation_cache.get(self.get_feature_key_name(),
                                      state_manager)
    tensor_lengths = transformation_cache.get(
        self.get_sequence_length_feature_key_name(),
        state_manager)

    # FeatureTransformationCache expands rank 1 tensors (like sequence length)
    # to rank 2. We need to undo this to match the standard CPU sequence
    # embedding.
    tensor_lengths = array_ops.squeeze(tensor_lengths, -1)

    return fc_lib.SequenceDenseColumn.TensorSequenceLengthPair(
        dense_tensor=tensor, sequence_length=tensor_lengths)