def _get_sequence_dense_tensor(
      self, inputs, weight_collections=None, trainable=None):
    if tpu.under_tpu_inference_context():
      def host_computation():
        return fc_lib.EmbeddingColumn._get_sequence_dense_tensor(
            self, inputs, weight_collections, trainable)
      return tpu.outside_compilation(host_computation)

    if _is_running_on_cpu():
      return fc_lib.EmbeddingColumn._get_sequence_dense_tensor(
          self, inputs, weight_collections, trainable)

    tensor = inputs.get(self.get_feature_key_name())
    tensor_lengths = inputs.get(self.get_sequence_length_feature_key_name())

    # inputs is a _LazyBuilder and for rank 1 tensors, it calls expand_dims(-1).
    # We need to undo this to match the standard CPU sequence embedding.
    tensor_lengths = array_ops.squeeze(tensor_lengths, -1)

    # Add to collection for _create_tpu_embedding_variables_and_ops
    _record_variable_scope_and_name(self.get_embedding_var_name(),
                                    'embedding_weights')

    return fc_lib.SequenceDenseColumn.TensorSequenceLengthPair(
        dense_tensor=tensor, sequence_length=tensor_lengths)
Beispiel #2
0
    def _get_dense_tensor_internal(self, transformation_cache, state_manager):
        if tpu.under_tpu_inference_context():

            def host_computation():
                return fc_lib.SharedEmbeddingColumn._get_dense_tensor_internal(
                    self, transformation_cache, state_manager)

            return tpu.outside_compilation(host_computation)

        if _is_running_on_cpu():
            return fc_lib.SharedEmbeddingColumn._get_dense_tensor_internal(
                self, transformation_cache, state_manager)

        # TPU mode
        # Get the embeddings from the FeatureTransformationCache.
        tensor = transformation_cache.get(self.get_feature_key_name(),
                                          state_manager)

        # Add to collection for _create_tpu_embedding_variables_and_ops
        # Note that in Feature Column V2, shared embeddings have no scope.
        _record_variable_scope_and_name(
            self.get_embedding_var_name(),
            self.shared_embedding_column_creator._name,
            is_shared_embedding=True)
        return tensor
Beispiel #3
0
    def _get_dense_tensor(self,
                          inputs,
                          weight_collections=None,
                          trainable=None):
        if tpu.under_tpu_inference_context():

            def host_computation():
                return fc_lib.EmbeddingColumn._get_dense_tensor(
                    self, inputs, weight_collections, trainable)

            return tpu.outside_compilation(host_computation)

        if _is_running_on_cpu():
            return fc_lib.EmbeddingColumn._get_dense_tensor(
                self, inputs, weight_collections, trainable)

        # TPU mode
        # Get the embeddings from the LazyBuilder.
        tensor = inputs.get(self.get_feature_key_name())

        # Add to collection for _create_tpu_embedding_variables_and_ops
        _record_variable_scope_and_name(self.get_embedding_var_name(),
                                        'embedding_weights')

        return tensor
Beispiel #4
0
    def create_state(self, state_manager):
        if _is_running_on_cpu():
            return fc_lib.EmbeddingColumn.create_state(self, state_manager)

        # Create state is called for the EmbeddingColumn to create its embedding
        # variables under feature column V2, if we are on TPU so record the scope
        # here.
        _record_variable_scope_and_name(self.get_embedding_var_name(),
                                        'embedding_weights')