コード例 #1
0
ファイル: combination.py プロジェクト: weiczhu/neuralmonkey
    def initial_loop_state(self) -> AttentionLoopState:

        length = sum(tf.shape(s)[1] for s in self._encoders_tensors)
        if self._use_sentinels:
            length += 1

        return empty_attention_loop_state(self.batch_size, length,
                                          self.context_vector_size)
コード例 #2
0
    def initial_loop_state(self) -> HierarchicalLoopState:
        length = len(self.attentions)
        if self._use_sentinels:
            length += 1

        return HierarchicalLoopState(
            child_loop_states=[a.initial_loop_state()
                               for a in self.attentions],
            loop_state=empty_attention_loop_state(
                self.batch_size, length, self.context_vector_size))
コード例 #3
0
ファイル: combination.py プロジェクト: ufal/neuralmonkey
    def initial_loop_state(self) -> HierarchicalLoopState:
        length = len(self.attentions)
        if self._use_sentinels:
            length += 1

        return HierarchicalLoopState(
            child_loop_states=[a.initial_loop_state()
                               for a in self.attentions],
            loop_state=empty_attention_loop_state(
                self.batch_size, length, self.context_vector_size))
コード例 #4
0
    def initial_loop_state(self) -> AttentionLoopState:

        # Here we need to make sure that the hidden_features and attention_mask
        # are pre-computed. If this is used in combination with a decoder which
        # has train and runtime while loops, these tensors need to be created
        # outside of any of those loops in order to be available to both.

        # Note that we are not breaking lazy loading here because this method
        # is called from a lazy tensor.

        debug("Pre-computing attention tensors", "bless")
        debug("Hidden features: {}".format(self.hidden_features), "bless")
        debug("Hidden mask: {}".format(self.attention_mask), "bless")

        return empty_attention_loop_state(
            self.batch_size,
            tf.shape(self.attention_states)[1],
            self.context_vector_size)
コード例 #5
0
ファイル: combination.py プロジェクト: ufal/neuralmonkey
    def initial_loop_state(self) -> AttentionLoopState:

        # Similarly to the feed_forward attention, we need to build the encoder
        # projections and masks before the while loop is entered so they are
        # not created as a part of the loop

        # pylint: disable=not-an-iterable
        for val in self.encoder_projections_for_logits:
            debug(val, "bless")
        debug(self.masks_concat, "bless")

        length = sum(tf.shape(s)[1] for s in self._encoders_tensors)
        # pylint: enable=not-an-iterable

        if self._use_sentinels:
            length += 1

        return empty_attention_loop_state(self.batch_size, length,
                                          self.context_vector_size)
コード例 #6
0
    def initial_loop_state(self) -> AttentionLoopState:

        # Similarly to the feed_forward attention, we need to build the encoder
        # projections and masks before the while loop is entered so they are
        # not created as a part of the loop

        # pylint: disable=not-an-iterable
        for val in self.encoder_projections_for_logits:
            debug(val, "bless")
        debug(self.masks_concat, "bless")

        length = sum(tf.shape(s)[1] for s in self._encoders_tensors)
        # pylint: enable=not-an-iterable

        if self._use_sentinels:
            length += 1

        return empty_attention_loop_state(self.batch_size, length,
                                          self.context_vector_size)
コード例 #7
0
 def initial_loop_state(self) -> AttentionLoopState:
     return empty_attention_loop_state(self.batch_size,
                                       tf.shape(self.attention_states)[1],
                                       self.context_vector_size)
コード例 #8
0
 def initial_loop_state(self) -> AttentionLoopState:
     return empty_attention_loop_state()
コード例 #9
0
 def initial_loop_state(self) -> HierarchicalLoopState:
     return HierarchicalLoopState(
         child_loop_states=[a.initial_loop_state()
                            for a in self.attentions],
         loop_state=empty_attention_loop_state())
コード例 #10
0
ファイル: stateful_context.py プロジェクト: ufal/neuralmonkey
 def initial_loop_state(self) -> AttentionLoopState:
     return empty_attention_loop_state(
         self.batch_size, 1,
         self.context_vector_size)