Example #1
0
 def do_attention(self, x):
     '''
     add none or one of the following attention layers
     :param x:
     :param att_type:
     :return:
     '''
     att_layer = None
     if self.att_type == 'scaled_dot':
         att_layer = scaled_dot_attention.ScaledDotProductAttention(
             name='Attention')
         x = att_layer(x)
         # x = GlobalAveragePooling1D()(x)
         x = GlobalMaxPooling1D()(x)
     elif self.att_type == 'seq_self_attention':
         att_layer = seq_self_attention.SeqSelfAttention(
             attention_activation='sigmoid')
         x = att_layer(x)
         # x = GlobalAveragePooling1D()(x)
         x = GlobalMaxPooling1D()(x)
     elif self.att_type == 'seq_weighted_attention':
         att_layer = seq_weighted_attention.SeqWeightedAttention()
         x = att_layer(x)
         # x = seq_weighted_attention.SeqWeightedAttention()(x)
     elif self.att_type == 'attention_with_context':
         att_layer = many_to_one_attention_with_context.AttentionWithContext(
         )
         x = att_layer(x)
     return x, att_layer
 def add_attention_layer(self, model):
     '''
     add the attention layer for the keras sequential modeling style
     :param model:
     :return:
     '''
     if self.att_type == 'scaled_dot':
         model.add(scaled_dot_attention.ScaledDotProductAttention(name='Attention'))
         model.add(GlobalAveragePooling1D())
     elif self.att_type == 'seq_self_attention':
         model.add(seq_self_attention.SeqSelfAttention(attention_activation='sigmoid'))
         model.add(GlobalAveragePooling1D())
     elif self.att_type == 'seq_weighted_attention':
         model.add(seq_weighted_attention.SeqWeightedAttention())
     elif self.att_type == 'attention_with_context':
         model.add(many_to_one_attention_with_context.AttentionWithContext())