def __init__(self, d_model, heads, d_ff, dropout, attention_dropout, self_attn_type="scaled-dot", max_relative_positions=0, aan_useffn=False, full_context_alignment=False, alignment_heads=None): super(TransformerDecoderLayer, self).__init__() if self_attn_type == "scaled-dot": self.self_attn = MultiHeadedAttention( heads, d_model, dropout=dropout, max_relative_positions=max_relative_positions) elif self_attn_type == "average": self.self_attn = AverageAttention(d_model, dropout=attention_dropout, aan_useffn=aan_useffn) self.context_attn = MultiHeadedAttention(heads, d_model, dropout=attention_dropout) self.feed_forward = PositionwiseFeedForward(d_model, d_ff, dropout) self.layer_norm_1 = nn.LayerNorm(d_model, eps=1e-6) self.layer_norm_2 = nn.LayerNorm(d_model, eps=1e-6) self.drop = nn.Dropout(dropout) self.full_context_alignment = full_context_alignment self.alignment_heads = alignment_heads
def __init__(self, d_model, heads, d_ff, dropout, attention_dropout, self_attn_type="scaled-dot", max_relative_positions=0, aan_useffn=False, tgt_concept_words_type=-1): super(TransformerDecoderLayer, self).__init__() if self_attn_type == "scaled-dot": self.self_attn = MultiHeadedAttention( heads, d_model, dropout=dropout, max_relative_positions=max_relative_positions) elif self_attn_type == "average": self.self_attn = AverageAttention(d_model, dropout=attention_dropout, aan_useffn=aan_useffn) self.context_attn = MultiHeadedAttention(heads, d_model, dropout=attention_dropout) self.feed_forward = PositionwiseFeedForward(d_model, d_ff, dropout) self.layer_norm_1 = nn.LayerNorm(d_model, eps=1e-6) self.layer_norm_2 = nn.LayerNorm(d_model, eps=1e-6) self.drop = nn.Dropout(dropout) self.tgt_concept_words_type = tgt_concept_words_type if tgt_concept_words_type in [2]: self.tgt_concept_mlp = nn.Linear(d_model * 2, d_model)
def __init__(self, opt, d_model, heads, d_ff, dropout, attention_dropout, self_attn_type="scaled-dot", max_relative_positions=0, aan_useffn=False, dict_size=None, label_emb=None): super(TransformerDecoderLayer, self).__init__() if self_attn_type == "scaled-dot": self.self_attn = MultiHeadedAttention( heads, d_model, dropout=dropout, max_relative_positions=max_relative_positions, dict_size=dict_size, label_emb=label_emb, opt=opt) elif self_attn_type == "average": self.self_attn = AverageAttention(d_model, dropout=attention_dropout, aan_useffn=aan_useffn) self.context_attn = MultiHeadedAttention(heads, d_model, dropout=attention_dropout) self.feed_forward = PositionwiseFeedForward(d_model, d_ff, dropout) self.layer_norm_1 = nn.LayerNorm(d_model, eps=1e-6) self.layer_norm_2 = nn.LayerNorm(d_model, eps=1e-6) self.drop = nn.Dropout(dropout)
def __init__(self, d_model, heads, d_ff, dropout, self_attn_type="scaled-dot", max_relative_positions=0): super(TransformerDecoderLayer, self).__init__() if self_attn_type == "scaled-dot": self.self_attn = MultiHeadedAttention( heads, d_model, dropout=dropout, max_relative_positions=max_relative_positions) elif self_attn_type == "average": self.self_attn = AverageAttention(d_model, dropout=dropout) self.context_attn = MultiHeadedAttention(heads, d_model, dropout=dropout) self.feed_forward = PositionwiseFeedForward(d_model, d_ff, dropout) self.layer_norm_1 = nn.LayerNorm(d_model, eps=1e-6) self.layer_norm_2 = nn.LayerNorm(d_model, eps=1e-6) self.drop = nn.Dropout(dropout)
def __init__(self, d_model, heads, d_ff, dropout, self_attn_type="scaled-dot"): super(TransformerDecoderLayer, self).__init__() if self_attn_type == "scaled-dot": self.self_attn = MultiHeadedAttention(heads, d_model, dropout=dropout) elif self_attn_type == "average": self.self_attn = AverageAttention(d_model, dropout=dropout) self.context_attn = MultiHeadedAttention(heads, d_model, dropout=dropout) self.feed_forward = PositionwiseFeedForward(d_model, d_ff, dropout) self.layer_norm_1 = nn.LayerNorm(d_model, eps=1e-6) self.layer_norm_2 = nn.LayerNorm(d_model, eps=1e-6) self.drop = nn.Dropout(dropout) mask = self._get_attn_subsequent_mask(MAX_SIZE) # Register self.mask as a buffer in TransformerDecoderLayer, so # it gets TransformerDecoderLayer's cuda behavior automatically. self.register_buffer('mask', mask)
def __init__(self, d_model, heads, d_ff, dropout, attn_dropout, self_attn_type="scaled-dot", max_relative_positions=0, ctx_weight_param=False): super(TransformerGPTDecoderLayerCtxattn, self).__init__() if self_attn_type == "scaled-dot": self.self_attn = MultiHeadedAttention( heads, d_model, dropout=attn_dropout, max_relative_positions=max_relative_positions) elif self_attn_type == "average": self.self_attn = AverageAttention(d_model, dropout=attn_dropout) self.context_attn = MultiHeadedAttention(heads, d_model, dropout=dropout) self.feed_forward = MLP(d_model, d_model * 4, dropout) self.layer_norm_1 = nn.LayerNorm(d_model, eps=1e-5) self.layer_norm_2 = nn.LayerNorm(d_model, eps=1e-5) self.context_layer_norm = nn.LayerNorm(d_model, eps=1e-5) self.drop = nn.Dropout(dropout) if ctx_weight_param: print('using ctx_weight_param') self.ctx_weight = Parameter(torch.zeros(1)) self.ctx_weight_param = ctx_weight_param
def __init__(self, d_model, heads, d_ff, dropout, attn_dropout, self_attn_type="scaled-dot", max_relative_positions=0): super(TransformerGPTUnconditionalDecoderLayer, self).__init__() if self_attn_type == "scaled-dot": self.self_attn = MultiHeadedAttention( heads, d_model, dropout=attn_dropout, max_relative_positions=max_relative_positions) elif self_attn_type == "average": self.self_attn = AverageAttention(d_model, dropout=attn_dropout) self.feed_forward = MLP(d_model, d_model * 4, dropout) self.layer_norm_1 = nn.LayerNorm(d_model, eps=1e-5) self.layer_norm_2 = nn.LayerNorm(d_model, eps=1e-5) self.drop = nn.Dropout(dropout)
class TransformerDecoderLayer(nn.Module): """ Args: d_model (int): the dimension of keys/values/queries in :class:`MultiHeadedAttention`, also the input size of the first-layer of the :class:`PositionwiseFeedForward`. heads (int): the number of heads for MultiHeadedAttention. d_ff (int): the second-layer of the :class:`PositionwiseFeedForward`. dropout (float): dropout probability. self_attn_type (string): type of self-attention scaled-dot, average """ def __init__(self, d_model, heads, d_ff, dropout, self_attn_type="scaled-dot", max_relative_positions=0): super(TransformerDecoderLayer, self).__init__() if self_attn_type == "scaled-dot": self.self_attn = MultiHeadedAttention( heads, d_model, dropout=dropout, max_relative_positions=max_relative_positions) elif self_attn_type == "average": self.self_attn = AverageAttention(d_model, dropout=dropout) self.context_attn = MultiHeadedAttention( heads, d_model, dropout=dropout) self.feed_forward = PositionwiseFeedForward(d_model, d_ff, dropout) self.layer_norm_1 = nn.LayerNorm(d_model, eps=1e-6) self.layer_norm_2 = nn.LayerNorm(d_model, eps=1e-6) self.drop = nn.Dropout(dropout) def forward(self, inputs, memory_bank, src_pad_mask, tgt_pad_mask, layer_cache=None, step=None): """ Args: inputs (FloatTensor): ``(batch_size, 1, model_dim)`` memory_bank (FloatTensor): ``(batch_size, src_len, model_dim)`` src_pad_mask (LongTensor): ``(batch_size, 1, src_len)`` tgt_pad_mask (LongTensor): ``(batch_size, 1, 1)`` Returns: (FloatTensor, FloatTensor): * output ``(batch_size, 1, model_dim)`` * attn ``(batch_size, 1, src_len)`` """ dec_mask = None if step is None: tgt_len = tgt_pad_mask.size(-1) future_mask = torch.ones( [tgt_len, tgt_len], device=tgt_pad_mask.device, dtype=torch.uint8) future_mask = future_mask.triu_(1).view(1, tgt_len, tgt_len) dec_mask = torch.gt(tgt_pad_mask + future_mask, 0) input_norm = self.layer_norm_1(inputs) if isinstance(self.self_attn, MultiHeadedAttention): query, attn = self.self_attn(input_norm, input_norm, input_norm, mask=dec_mask, layer_cache=layer_cache, attn_type="self") elif isinstance(self.self_attn, AverageAttention): query, attn = self.self_attn(input_norm, mask=dec_mask, layer_cache=layer_cache, step=step) query = self.drop(query) + inputs query_norm = self.layer_norm_2(query) context, attn = self.context_attn(memory_bank, memory_bank, query_norm, mask=src_pad_mask, layer_cache=layer_cache, attn_type="context") output = self.feed_forward(self.drop(context) + query) return output, attn, context def update_dropout(self, dropout): self.self_attn.update_dropout(dropout) self.context_attn.update_dropout(dropout) self.feed_forward.update_dropout(dropout) self.drop.p = dropout
class TransformerDecoderLayer(nn.Module): """ Args: d_model (int): the dimension of keys/values/queries in :class:`MultiHeadedAttention`, also the input size of the first-layer of the :class:`PositionwiseFeedForward`. heads (int): the number of heads for MultiHeadedAttention. d_ff (int): the second-layer of the :class:`PositionwiseFeedForward`. dropout (float): dropout probability. self_attn_type (string): type of self-attention scaled-dot, average """ def __init__(self, d_model, heads, d_ff, dropout, attention_dropout, self_attn_type="scaled-dot", max_relative_positions=0, aan_useffn=False, full_context_alignment=False, alignment_heads=None): super(TransformerDecoderLayer, self).__init__() if self_attn_type == "scaled-dot": self.self_attn = MultiHeadedAttention( heads, d_model, dropout=dropout, max_relative_positions=max_relative_positions) elif self_attn_type == "average": self.self_attn = AverageAttention(d_model, dropout=attention_dropout, aan_useffn=aan_useffn) self.context_attn = MultiHeadedAttention(heads, d_model, dropout=attention_dropout) self.feed_forward = PositionwiseFeedForward(d_model, d_ff, dropout) self.layer_norm_1 = nn.LayerNorm(d_model, eps=1e-6) self.layer_norm_2 = nn.LayerNorm(d_model, eps=1e-6) self.drop = nn.Dropout(dropout) self.full_context_alignment = full_context_alignment self.alignment_heads = alignment_heads def forward(self, *args, **kwargs): """ Extend _forward for (possibly) multiple decoder pass: 1. Always a default (future masked) decoder forward pass, 2. Possibly a second future aware decoder pass for joint learn full context alignement. Args: * All arguments of _forward. with_align (bool): whether return alignment attention. Returns: (FloatTensor, FloatTensor, FloatTensor or None): * output ``(batch_size, 1, model_dim)`` * top_attn ``(batch_size, 1, src_len)`` * attn_align ``(batch_size, 1, src_len)`` or None """ with_align = kwargs.pop('with_align', False) output, attns = self._forward(*args, **kwargs) top_attn = attns[:, 0, :, :].contiguous() attn_align = None if with_align: if self.full_context_alignment: # return _, (B, Q_len, K_len) _, attns = self._forward(*args, **kwargs, future=True) if self.alignment_heads is not None: attns = attns[:, :self.alignment_heads, :, :].contiguous() # layer average attention across heads, get ``(B, Q, K)`` # Case 1: no full_context, no align heads -> layer avg baseline # Case 2: no full_context, 1 align heads -> guided align # Case 3: full_context, 1 align heads -> full cte guided align attn_align = attns.mean(dim=1) return output, top_attn, attn_align def _forward(self, inputs, memory_bank, src_pad_mask, tgt_pad_mask, layer_cache=None, step=None, future=False): """ A naive forward pass for transformer decoder. # TODO: change 1 to T as T could be 1 or tgt_len Args: inputs (FloatTensor): ``(batch_size, 1, model_dim)`` memory_bank (FloatTensor): ``(batch_size, src_len, model_dim)`` src_pad_mask (LongTensor): ``(batch_size, 1, src_len)`` tgt_pad_mask (LongTensor): ``(batch_size, 1, 1)`` Returns: (FloatTensor, FloatTensor): * output ``(batch_size, 1, model_dim)`` * attns ``(batch_size, head, 1, src_len)`` """ dec_mask = None if step is None: tgt_len = tgt_pad_mask.size(-1) if not future: # apply future_mask, result mask in (B, T, T) future_mask = torch.ones([tgt_len, tgt_len], device=tgt_pad_mask.device, dtype=torch.uint8) future_mask = future_mask.triu_(1).view(1, tgt_len, tgt_len) # BoolTensor was introduced in pytorch 1.2 try: future_mask = future_mask.bool() except AttributeError: pass dec_mask = torch.gt(tgt_pad_mask + future_mask, 0) else: # only mask padding, result mask in (B, 1, T) dec_mask = tgt_pad_mask input_norm = self.layer_norm_1(inputs) if isinstance(self.self_attn, MultiHeadedAttention): query, _ = self.self_attn(input_norm, input_norm, input_norm, mask=dec_mask, layer_cache=layer_cache, attn_type="self") elif isinstance(self.self_attn, AverageAttention): query, _ = self.self_attn(input_norm, mask=dec_mask, layer_cache=layer_cache, step=step) elif isinstance(self.self_attn, MultiHeadedCausalAttention): query, _ = self.self_attn(input_norm, input_norm, input_norm, mask=dec_mask, layer_cache=layer_cache, attn_type="self", decoder=True) query = self.drop(query) + inputs query_norm = self.layer_norm_2(query) mid, attns = self.context_attn(memory_bank, memory_bank, query_norm, mask=src_pad_mask, layer_cache=layer_cache, attn_type="context") output = self.feed_forward(self.drop(mid) + query) return output, attns def update_dropout(self, dropout, attention_dropout): self.self_attn.update_dropout(attention_dropout) self.context_attn.update_dropout(attention_dropout) self.feed_forward.update_dropout(dropout) self.drop.p = dropout
def __init__( self, d_model, heads, d_ff, dropout, attention_dropout, self_attn_type="scaled-dot", max_relative_positions=0, aan_useffn=False, full_context_alignment=False, alignment_heads=0, pos_ffn_activation_fn=ActivationFunction.relu, ): """ Args: d_model (int): the dimension of keys/values/queries in :class:`MultiHeadedAttention`, also the input size of the first-layer of the :class:`PositionwiseFeedForward`. heads (int): the number of heads for MultiHeadedAttention. d_ff (int): the second-layer of the :class:`PositionwiseFeedForward`. dropout (float): dropout in residual, self-attn(dot) and feed-forward attention_dropout (float): dropout in context_attn (and self-attn(avg)) self_attn_type (string): type of self-attention scaled-dot, average max_relative_positions (int): Max distance between inputs in relative positions representations aan_useffn (bool): Turn on the FFN layer in the AAN decoder full_context_alignment (bool): whether enable an extra full context decoder forward for alignment alignment_heads (int): N. of cross attention heads to use for alignment guiding pos_ffn_activation_fn (ActivationFunction): activation function choice for PositionwiseFeedForward layer """ super(TransformerDecoderLayerBase, self).__init__() if self_attn_type == "scaled-dot": self.self_attn = MultiHeadedAttention( heads, d_model, dropout=attention_dropout, max_relative_positions=max_relative_positions, ) elif self_attn_type == "average": self.self_attn = AverageAttention(d_model, dropout=attention_dropout, aan_useffn=aan_useffn) self.feed_forward = PositionwiseFeedForward(d_model, d_ff, dropout, pos_ffn_activation_fn) self.layer_norm_1 = nn.LayerNorm(d_model, eps=1e-6) self.drop = nn.Dropout(dropout) self.full_context_alignment = full_context_alignment self.alignment_heads = alignment_heads
class TransformerDecoderLayerBase(nn.Module): def __init__( self, d_model, heads, d_ff, dropout, attention_dropout, self_attn_type="scaled-dot", max_relative_positions=0, aan_useffn=False, full_context_alignment=False, alignment_heads=0, pos_ffn_activation_fn=ActivationFunction.relu, ): """ Args: d_model (int): the dimension of keys/values/queries in :class:`MultiHeadedAttention`, also the input size of the first-layer of the :class:`PositionwiseFeedForward`. heads (int): the number of heads for MultiHeadedAttention. d_ff (int): the second-layer of the :class:`PositionwiseFeedForward`. dropout (float): dropout in residual, self-attn(dot) and feed-forward attention_dropout (float): dropout in context_attn (and self-attn(avg)) self_attn_type (string): type of self-attention scaled-dot, average max_relative_positions (int): Max distance between inputs in relative positions representations aan_useffn (bool): Turn on the FFN layer in the AAN decoder full_context_alignment (bool): whether enable an extra full context decoder forward for alignment alignment_heads (int): N. of cross attention heads to use for alignment guiding pos_ffn_activation_fn (ActivationFunction): activation function choice for PositionwiseFeedForward layer """ super(TransformerDecoderLayerBase, self).__init__() if self_attn_type == "scaled-dot": self.self_attn = MultiHeadedAttention( heads, d_model, dropout=attention_dropout, max_relative_positions=max_relative_positions, ) elif self_attn_type == "average": self.self_attn = AverageAttention(d_model, dropout=attention_dropout, aan_useffn=aan_useffn) self.feed_forward = PositionwiseFeedForward(d_model, d_ff, dropout, pos_ffn_activation_fn) self.layer_norm_1 = nn.LayerNorm(d_model, eps=1e-6) self.drop = nn.Dropout(dropout) self.full_context_alignment = full_context_alignment self.alignment_heads = alignment_heads def forward(self, *args, **kwargs): """Extend `_forward` for (possibly) multiple decoder pass: Always a default (future masked) decoder forward pass, Possibly a second future aware decoder pass for joint learn full context alignement, :cite:`garg2019jointly`. Args: * All arguments of _forward. with_align (bool): whether return alignment attention. Returns: (FloatTensor, FloatTensor, FloatTensor or None): * output ``(batch_size, T, model_dim)`` * top_attn ``(batch_size, T, src_len)`` * attn_align ``(batch_size, T, src_len)`` or None """ with_align = kwargs.pop("with_align", False) output, attns = self._forward(*args, **kwargs) top_attn = attns[:, 0, :, :].contiguous() attn_align = None if with_align: if self.full_context_alignment: # return _, (B, Q_len, K_len) _, attns = self._forward(*args, **kwargs, future=True) if self.alignment_heads > 0: attns = attns[:, :self.alignment_heads, :, :].contiguous() # layer average attention across heads, get ``(B, Q, K)`` # Case 1: no full_context, no align heads -> layer avg baseline # Case 2: no full_context, 1 align heads -> guided align # Case 3: full_context, 1 align heads -> full cte guided align attn_align = attns.mean(dim=1) return output, top_attn, attn_align def update_dropout(self, dropout, attention_dropout): self.self_attn.update_dropout(attention_dropout) self.feed_forward.update_dropout(dropout) self.drop.p = dropout def _forward(self, *args, **kwargs): raise NotImplementedError def _compute_dec_mask(self, tgt_pad_mask, future): tgt_len = tgt_pad_mask.size(-1) if not future: # apply future_mask, result mask in (B, T, T) future_mask = torch.ones( [tgt_len, tgt_len], device=tgt_pad_mask.device, dtype=torch.uint8, ) future_mask = future_mask.triu_(1).view(1, tgt_len, tgt_len) # BoolTensor was introduced in pytorch 1.2 try: future_mask = future_mask.bool() except AttributeError: pass dec_mask = torch.gt(tgt_pad_mask + future_mask, 0) else: # only mask padding, result mask in (B, 1, T) dec_mask = tgt_pad_mask return dec_mask def _forward_self_attn(self, inputs_norm, dec_mask, layer_cache, step): if isinstance(self.self_attn, MultiHeadedAttention): return self.self_attn( inputs_norm, inputs_norm, inputs_norm, mask=dec_mask, layer_cache=layer_cache, attn_type="self", ) elif isinstance(self.self_attn, AverageAttention): return self.self_attn(inputs_norm, mask=dec_mask, layer_cache=layer_cache, step=step) else: raise ValueError( f"self attention {type(self.self_attn)} not supported")
class TransformerDecoderLayer(nn.Module): """Transformer Decoder layer block in Pre-Norm style. Pre-Norm style is an improvement w.r.t. Original paper's Post-Norm style, providing better converge speed and performance. This is also the actual implementation in tensor2tensor and also avalable in fairseq. See https://tunz.kr/post/4 and :cite:`DeeperTransformer`. .. mermaid:: graph LR %% "*SubLayer" can be self-attn, src-attn or feed forward block A(input) --> B[Norm] B --> C["*SubLayer"] C --> D[Drop] D --> E((+)) A --> E E --> F(out) Args: d_model (int): the dimension of keys/values/queries in :class:`MultiHeadedAttention`, also the input size of the first-layer of the :class:`PositionwiseFeedForward`. heads (int): the number of heads for MultiHeadedAttention. d_ff (int): the second-layer of the :class:`PositionwiseFeedForward`. dropout (float): dropout in residual, self-attn(dot) and feed-forward attention_dropout (float): dropout in context_attn (and self-attn(avg)) self_attn_type (string): type of self-attention scaled-dot, average max_relative_positions (int): Max distance between inputs in relative positions representations aan_useffn (bool): Turn on the FFN layer in the AAN decoder full_context_alignment (bool): whether enable an extra full context decoder forward for alignment alignment_heads (int): N. of cross attention heads to use for alignment guiding """ def __init__(self, d_model, heads, d_ff, dropout, attention_dropout, self_attn_type="scaled-dot", max_relative_positions=0, aan_useffn=False, full_context_alignment=False, alignment_heads=0): super(TransformerDecoderLayer, self).__init__() if self_attn_type == "scaled-dot": self.self_attn = MultiHeadedAttention( heads, d_model, dropout=attention_dropout, max_relative_positions=max_relative_positions) elif self_attn_type == "average": self.self_attn = AverageAttention(d_model, dropout=attention_dropout, aan_useffn=aan_useffn) self.context_attn = MultiHeadedAttention(heads, d_model, dropout=attention_dropout) self.feed_forward = PositionwiseFeedForward(d_model, d_ff, dropout) self.layer_norm_1 = nn.LayerNorm(d_model, eps=1e-6) self.layer_norm_2 = nn.LayerNorm(d_model, eps=1e-6) self.drop = nn.Dropout(dropout) self.full_context_alignment = full_context_alignment self.alignment_heads = alignment_heads def forward(self, *args, **kwargs): """ Extend `_forward` for (possibly) multiple decoder pass: Always a default (future masked) decoder forward pass, Possibly a second future aware decoder pass for joint learn full context alignement, :cite:`garg2019jointly`. Args: * All arguments of _forward. with_align (bool): whether return alignment attention. Returns: (FloatTensor, FloatTensor, FloatTensor or None): * output ``(batch_size, T, model_dim)`` * top_attn ``(batch_size, T, src_len)`` * attn_align ``(batch_size, T, src_len)`` or None """ with_align = kwargs.pop('with_align', False) output, attns = self._forward(*args, **kwargs) top_attn = attns[:, 0, :, :].contiguous() attn_align = None if with_align: if self.full_context_alignment: # return _, (B, Q_len, K_len) _, attns = self._forward(*args, **kwargs, future=True) if self.alignment_heads > 0: attns = attns[:, :self.alignment_heads, :, :].contiguous() # layer average attention across heads, get ``(B, Q, K)`` # Case 1: no full_context, no align heads -> layer avg baseline # Case 2: no full_context, 1 align heads -> guided align # Case 3: full_context, 1 align heads -> full cte guided align attn_align = attns.mean(dim=1) return output, top_attn, attn_align def _forward(self, inputs, memory_bank, src_pad_mask, tgt_pad_mask, layer_cache=None, step=None, future=False): """ A naive forward pass for transformer decoder. # T: could be 1 in the case of stepwise decoding or tgt_len Args: inputs (FloatTensor): ``(batch_size, T, model_dim)`` memory_bank (FloatTensor): ``(batch_size, src_len, model_dim)`` src_pad_mask (LongTensor): ``(batch_size, 1, src_len)`` tgt_pad_mask (LongTensor): ``(batch_size, 1, T)`` layer_cache (dict or None): cached layer info when stepwise decode step (int or None): stepwise decoding counter future (bool): If set True, do not apply future_mask. Returns: (FloatTensor, FloatTensor): * output ``(batch_size, T, model_dim)`` * attns ``(batch_size, head, T, src_len)`` """ dec_mask = None if step is None: tgt_len = tgt_pad_mask.size(-1) if not future: # apply future_mask, result mask in (B, T, T) future_mask = torch.ones([tgt_len, tgt_len], device=tgt_pad_mask.device, dtype=torch.uint8) future_mask = future_mask.triu_(1).view(1, tgt_len, tgt_len) # BoolTensor was introduced in pytorch 1.2 try: future_mask = future_mask.bool() except AttributeError: pass dec_mask = torch.gt(tgt_pad_mask + future_mask, 0) else: # only mask padding, result mask in (B, 1, T) dec_mask = tgt_pad_mask input_norm = self.layer_norm_1(inputs) if isinstance(self.self_attn, MultiHeadedAttention): query, _ = self.self_attn(input_norm, input_norm, input_norm, mask=dec_mask, layer_cache=layer_cache, attn_type="self") elif isinstance(self.self_attn, AverageAttention): query, _ = self.self_attn(input_norm, mask=dec_mask, layer_cache=layer_cache, step=step) query = self.drop(query) + inputs query_norm = self.layer_norm_2(query) mid, attns = self.context_attn(memory_bank, memory_bank, query_norm, mask=src_pad_mask, layer_cache=layer_cache, attn_type="context") output = self.feed_forward(self.drop(mid) + query) return output, attns def update_dropout(self, dropout, attention_dropout): self.self_attn.update_dropout(attention_dropout) self.context_attn.update_dropout(attention_dropout) self.feed_forward.update_dropout(dropout) self.drop.p = dropout
class TransformerDecoderLayer(nn.Module): """ Args: d_model (int): the dimension of keys/values/queries in :class:`MultiHeadedAttention`, also the input size of the first-layer of the :class:`PositionwiseFeedForward`. heads (int): the number of heads for MultiHeadedAttention. d_ff (int): the second-layer of the :class:`PositionwiseFeedForward`. dropout (float): dropout probability. self_attn_type (string): type of self-attention scaled-dot, average """ def __init__(self, d_model, heads, d_ff, dropout, attention_dropout, self_attn_type="scaled-dot", max_relative_positions=0, aan_useffn=False, tgt_concept_words_type=-1): super(TransformerDecoderLayer, self).__init__() if self_attn_type == "scaled-dot": self.self_attn = MultiHeadedAttention( heads, d_model, dropout=dropout, max_relative_positions=max_relative_positions) elif self_attn_type == "average": self.self_attn = AverageAttention(d_model, dropout=attention_dropout, aan_useffn=aan_useffn) self.context_attn = MultiHeadedAttention(heads, d_model, dropout=attention_dropout) self.feed_forward = PositionwiseFeedForward(d_model, d_ff, dropout) self.layer_norm_1 = nn.LayerNorm(d_model, eps=1e-6) self.layer_norm_2 = nn.LayerNorm(d_model, eps=1e-6) self.drop = nn.Dropout(dropout) self.tgt_concept_words_type = tgt_concept_words_type if tgt_concept_words_type in [2]: self.tgt_concept_mlp = nn.Linear(d_model * 2, d_model) def forward(self, inputs, memory_bank, src_pad_mask, tgt_pad_mask, layer_cache=None, step=None, tgt_concept_words_emb=None, tgt_concept_words_type=-1): """ Args: inputs (FloatTensor): ``(batch_size, 1, model_dim)`` memory_bank (FloatTensor): ``(batch_size, src_len, model_dim)`` src_pad_mask (LongTensor): ``(batch_size, 1, src_len)`` tgt_pad_mask (LongTensor): ``(batch_size, 1, 1)`` Returns: (FloatTensor, FloatTensor): * output ``(batch_size, 1, model_dim)`` * attn ``(batch_size, 1, src_len)`` """ dec_mask = None if step is None: tgt_len = tgt_pad_mask.size(-1) future_mask = torch.ones([tgt_len, tgt_len], device=tgt_pad_mask.device, dtype=torch.uint8) future_mask = future_mask.triu_(1).view(1, tgt_len, tgt_len) # BoolTensor was introduced in pytorch 1.2 try: future_mask = future_mask.bool() except AttributeError: pass dec_mask = torch.gt(tgt_pad_mask + future_mask, 0) input_norm = self.layer_norm_1(inputs) if isinstance(self.self_attn, MultiHeadedAttention): query, attn = self.self_attn(input_norm, input_norm, input_norm, mask=dec_mask, layer_cache=layer_cache, attn_type="self") elif isinstance(self.self_attn, AverageAttention): query, attn = self.self_attn(input_norm, mask=dec_mask, layer_cache=layer_cache, step=step) query = self.drop(query) + inputs # ablation if tgt_concept_words_emb is not None: # print(query.shape, tgt_concept_words_emb.shape) if self.tgt_concept_words_type == 2: query = self.tgt_concept_mlp( torch.cat([query, tgt_concept_words_emb], dim=2)) if self.tgt_concept_words_type == 3: query = (query + tgt_concept_words_emb) / 2 query_norm = self.layer_norm_2(query) mid, attn = self.context_attn(memory_bank, memory_bank, query_norm, mask=src_pad_mask, layer_cache=layer_cache, attn_type="context") output = self.feed_forward(self.drop(mid) + query) return output, attn def update_dropout(self, dropout, attention_dropout): self.self_attn.update_dropout(attention_dropout) self.context_attn.update_dropout(attention_dropout) self.feed_forward.update_dropout(dropout) self.drop.p = dropout
class TransformerDecoderLayer(nn.Module): """ Args: d_model (int): the dimension of keys/values/queries in :class:`MultiHeadedAttention`, also the input size of the first-layer of the :class:`PositionwiseFeedForward`. heads (int): the number of heads for MultiHeadedAttention. d_ff (int): the second-layer of the :class:`PositionwiseFeedForward`. dropout (float): dropout probability. self_attn_type (string): type of self-attention scaled-dot, average """ def __init__(self, opt, d_model, heads, d_ff, dropout, attention_dropout, self_attn_type="scaled-dot", max_relative_positions=0, aan_useffn=False, dict_size=None, label_emb=None): super(TransformerDecoderLayer, self).__init__() if self_attn_type == "scaled-dot": self.self_attn = MultiHeadedAttention( heads, d_model, dropout=dropout, max_relative_positions=max_relative_positions, dict_size=dict_size, label_emb=label_emb, opt=opt) elif self_attn_type == "average": self.self_attn = AverageAttention(d_model, dropout=attention_dropout, aan_useffn=aan_useffn) self.context_attn = MultiHeadedAttention(heads, d_model, dropout=attention_dropout) self.feed_forward = PositionwiseFeedForward(d_model, d_ff, dropout) self.layer_norm_1 = nn.LayerNorm(d_model, eps=1e-6) self.layer_norm_2 = nn.LayerNorm(d_model, eps=1e-6) self.drop = nn.Dropout(dropout) def forward(self, inputs, memory_bank, src_pad_mask, tgt_pad_mask, layer_cache=None, step=None, gold_par_attn=None, gold_ch_attn=None): """ Args: inputs (FloatTensor): ``(batch_size, 1, model_dim)`` memory_bank (FloatTensor): ``(batch_size, src_len, model_dim)`` src_pad_mask (LongTensor): ``(batch_size, 1, src_len)`` tgt_pad_mask (LongTensor): ``(batch_size, 1, 1)`` Returns: (FloatTensor, FloatTensor): * output ``(batch_size, 1, model_dim)`` * attn ``(batch_size, 1, src_len)`` """ dec_mask = None if step is None: tgt_len = tgt_pad_mask.size(-1) future_mask = torch.ones([tgt_len, tgt_len], device=tgt_pad_mask.device, dtype=torch.uint8) future_mask = future_mask.triu_(1).view(1, tgt_len, tgt_len) #future_mask = future_mask.triu_(0).view(1, tgt_len, tgt_len) #future_mask[0,0,0]=0 # BoolTensor was introduced in pytorch 1.2 try: future_mask = future_mask.bool() except AttributeError: pass dec_mask = torch.gt(tgt_pad_mask + future_mask, 0) # elif step!=0 and synsa: # self_mask = torch.zeros( # [1,1, step+1], # device=tgt_pad_mask.device, # dtype=torch.uint8) # self_mask[:,:,-1]=1 # try: # self_mask = self_mask.bool() # except AttributeError: # pass # dec_mask = torch.gt(self_mask, 0) input_norm = self.layer_norm_1(inputs) if isinstance(self.self_attn, MultiHeadedAttention): query, tgt_attn, second_attn, ch_labels, par_labels = self.self_attn( input_norm, input_norm, input_norm, mask=dec_mask, layer_cache=layer_cache, attn_type="self", gold_par_attn=gold_par_attn, gold_ch_attn=gold_ch_attn) elif isinstance(self.self_attn, AverageAttention): query, attn = self.self_attn(input_norm, mask=dec_mask, layer_cache=layer_cache, step=step) query = self.drop(query) + inputs query_norm = self.layer_norm_2(query) mid, src_attn, _, _, _ = self.context_attn(memory_bank, memory_bank, query_norm, mask=src_pad_mask, layer_cache=layer_cache, attn_type="context") output = self.feed_forward(self.drop(mid) + query) return output, src_attn, tgt_attn, second_attn, dec_mask, ch_labels, par_labels def update_dropout(self, dropout, attention_dropout): self.self_attn.update_dropout(attention_dropout) self.context_attn.update_dropout(attention_dropout) self.feed_forward.update_dropout(dropout) self.drop.p = dropout
class TransformerLMDecoderLayer(TransformerDecoderLayerBase): """Transformer Decoder only layer block in GPT style. .. mermaid:: graph LR %% "*SubLayer" can be self-attn, src-attn or feed forward block A(input) --> B[Norm] B --> C["*SubLayer"] C --> D[Drop] D --> E((+)) A --> E E --> F(out) Args: d_model (int): the dimension of keys/values/queries in :class:`MultiHeadedAttention`, also the input size of the first-layer of the :class:`PositionwiseFeedForward`. heads (int): the number of heads for MultiHeadedAttention. d_ff (int): the second-layer of the :class:`PositionwiseFeedForward`. dropout (float): dropout in residual, self-attn(dot) and feed-forward attention_dropout (float): dropout in context_attn (and self-attn(avg)) self_attn_type (string): type of self-attention scaled-dot, average max_relative_positions (int): Max distance between inputs in relative positions representations aan_useffn (bool): Turn on the FFN layer in the AAN decoder full_context_alignment (bool): whether enable an extra full context decoder forward for alignment alignment_heads (int): N. of cross attention heads to use for alignment guiding """ def __init__( self, d_model, heads, d_ff, dropout, attention_dropout, self_attn_type="scaled-dot", max_relative_positions=0, aan_useffn=False, full_context_alignment=False, alignment_heads=0, ): super(TransformerLMDecoderLayer, self).__init__() if self_attn_type == "scaled-dot": self.self_attn = MultiHeadedAttention( heads, d_model, dropout=attention_dropout, max_relative_positions=max_relative_positions, ) elif self_attn_type == "average": self.self_attn = AverageAttention( d_model, dropout=attention_dropout, aan_useffn=aan_useffn ) self.feed_forward = PositionwiseFeedForward(d_model, d_ff, dropout) self.layer_norm_1 = nn.LayerNorm(d_model, eps=1e-6) self.layer_norm_2 = nn.LayerNorm(d_model, eps=1e-6) self.drop = nn.Dropout(dropout) self.full_context_alignment = full_context_alignment self.alignment_heads = alignment_heads def _forward( self, inputs, tgt_pad_mask, layer_cache=None, step=None, future=False ): """A naive forward pass for transformer decoder. # T: could be 1 in the case of stepwise decoding or tgt_len Args: inputs (FloatTensor): ``(batch_size, T, model_dim)`` tgt_pad_mask (bool): ``(batch_size, 1, T)`` layer_cache (dict or None): cached layer info when stepwise decode step (int or None): stepwise decoding counter future (bool): If set True, do not apply future_mask. Returns: (FloatTensor, FloatTensor): * output ``(batch_size, T, model_dim)`` * attns ``(batch_size, head, T, T)`` """ dec_mask = None if step is None: tgt_len = tgt_pad_mask.size(-1) if not future: # apply future_mask, result mask in (B, T, T) future_mask = torch.ones( [tgt_len, tgt_len], device=tgt_pad_mask.device, dtype=torch.uint8, ) future_mask = future_mask.triu_(1).view(1, tgt_len, tgt_len) # BoolTensor was introduced in pytorch 1.2 try: future_mask = future_mask.bool() except AttributeError: pass dec_mask = torch.gt(tgt_pad_mask + future_mask, 0) else: # only mask padding, result mask in (B, 1, T) dec_mask = tgt_pad_mask inputs_norm = self.layer_norm_1(inputs) if isinstance(self.self_attn, MultiHeadedAttention): query, attns = self.self_attn( inputs_norm, inputs_norm, inputs_norm, mask=dec_mask, layer_cache=layer_cache, attn_type="self", ) elif isinstance(self.self_attn, AverageAttention): query, attns = self.self_attn( inputs_norm, mask=dec_mask, layer_cache=layer_cache, step=step ) output = self.drop(query) + inputs output_feedforward = self.feed_forward(self.layer_norm_2(output)) output_norm = self.drop(output_feedforward) + output return output_norm, attns def update_dropout(self, dropout, attention_dropout): self.self_attn.update_dropout(attention_dropout) self.feed_forward.update_dropout(dropout) self.drop.p = dropout