コード例 #1
0
    def __call__(self, query, state, softmaxed=True):
        """Score the query based on the keys and values.

        Args:
          query: Tensor of dtype matching `self.values` and shape
            `[batch_size, query_depth]`.
          state: Tensor of dtype matching `self.values` and shape
            `[batch_size, alignments_size]`
            (`alignments_size` is memory's `max_time`).

        Returns:
          alignments: Tensor of dtype matching `self.values` and shape
            `[batch_size, alignments_size]` (`alignments_size` is memory's
            `max_time`).
        """
        with variable_scope.variable_scope(None, "bahdanau_attention",
                                           [query]):
            processed_query = self.query_layer(
                query) if self.query_layer else query
            score = _bahdanau_score(processed_query, self._keys,
                                    self._normalize)
        if softmaxed:
            alignments = self._probability_fn(score, state)
            next_state = alignments
            return alignments, next_state
        score = masked_mean_pooling(score, self._memory_sequence_length,
                                    int(self._values.shape[1]))
        return score
コード例 #2
0
 def __call__(self, query, state=None):
     with vs.variable_scope(self._attention_scope, reuse=True):
         processed_query = (self.query_layer(query)
                            if self.query_layer else query)
         score = _bahdanau_score(processed_query=processed_query,
                                 keys=self._keys,
                                 normalize=self._normalize)
     alignments = self._probability_fn(score, state)
     next_state = alignments
     return alignments, next_state
コード例 #3
0
    def __call__(self, query, state):
        processed_query = self.query_layer(
            query) if self.query_layer else query

        with tf.variable_scope("bahdanau_attention", reuse=tf.AUTO_REUSE):
            score = _bahdanau_score(processed_query, self._keys,
                                    self._normalize)

        alignments = self._probability_fn(score, state)
        next_state = alignments
        return alignments, next_state
コード例 #4
0
    def __call__(self, query, state):
        """Score the query based on the keys and values.

        Args:
          query: Tensor of dtype matching `self.values` and shape
            `[batch_size, query_depth]`.
          state: Tensor of dtype matching `self.values` and shape
            `[batch_size, alignments_size]`
            (`alignments_size` is memory's `max_time`).

        Returns:
          alignments: Tensor of dtype matching `self.values` and shape
            `[batch_size, alignments_size]` (`alignments_size` is memory's
            `max_time`).
        """
        with tf.variable_scope(None, "bahdanau_monotonic_hccho_attention",
                               [query]):
            processed_query = self.query_layer(
                query) if self.query_layer else query

            # processed_query: (N,num_units)  ==> self._keys와 더하기 위해서  _bahdanau_score 내부에서 (N,1,num_units) 으로 변환.
            # self._keys: (N, encoder_dim, num_units)
            score = _bahdanau_score(processed_query, self._keys,
                                    self._normalize)  # keys 가 memory임
            score_bias = tf.get_variable("attention_score_bias",
                                         dtype=processed_query.dtype,
                                         initializer=self._score_bias_init)

            #alignments_bias = tf.get_variable("alignments_bias", shape = state.get_shape()[-1],dtype=processed_query.dtype, initializer=tf.zeros_initializer())  # hccho
            alignments_bias = tf.get_variable(
                "alignments_bias",
                shape=(1),
                dtype=processed_query.dtype,
                initializer=tf.zeros_initializer())  # hccho

            score += score_bias
        alignments = self._probability_fn(
            score, state)  #BahdanauAttention에서 _probability_fn = softmax

        next_state = alignments  # 다음 alignment 계산에 사용할 state 값
        # hccho. alignment가 attention 계산에 직접 사용된다.
        alignments = tf.nn.relu(alignments + alignments_bias)
        alignments = alignments / (tf.reduce_sum(
            alignments, axis=-1, keepdims=True) + 1.0e-12)  # hccho 수정

        return alignments, next_state
コード例 #5
0
 def __call__(self, query, state):
     """Score the query based on the keys and values.
     Args:
     query: Tensor of dtype matching `self.values` and shape
         `[batch_size, query_depth]`.
     state: Tensor of dtype matching `self.values` and shape
         `[batch_size, alignments_size]`
         (`alignments_size` is memory's `max_time`).
     Returns:
     alignments: Tensor of dtype matching `self.values` and shape
         `[batch_size, alignments_size]` (`alignments_size` is memory's
         `max_time`).
     """
     with variable_scope.variable_scope(None, "bahdanau_monotonic_attention", [query]):
         processed_query = self.query_layer(query) if self.query_layer else query
         score = _bahdanau_score(processed_query, self._keys, self._normalize)
         score_bias = variable_scope.get_variable(
             "attention_score_bias", dtype=processed_query.dtype,
             initializer=self._score_bias_init)
         score += score_bias
     
     alignments = self._probability_fn(score, state)
     next_state = alignments
     return alignments, next_state