def foo(a, b, c=None): """A function multiple arguments and an optional argument""" if any(type(t) is not Tensor for t in (a, b, c)) and has_torch_function((a, b, c)): return handle_torch_function(foo, (a, b, c), a, b, c=c) if c: return a + b + c return a + b
def unsupported_jit_cat(tensors, dim): if any(type(t) is not Tensor for t in tensors) and has_torch_function(tensors): return handle_torch_function(cat, tensors, tensors=tensors, dim=dim)
def fn(tensors, dim=0, out=None): if not torch.jit.is_scripting(): if any(type(t) is not Tensor for t in tensors) and has_torch_function( tensors ): return handle_torch_function(fn, tensors, tensors, dim=dim, out=out) return original_fn(tensors, dim=dim, out=out)
def unsupported_jit_cat(tensors, dim): if not isinstance(tensors, (tuple, list)): tensors = tuple(tensors) return unsupported_jit_cat(tensors, dim) if any(type(t) is not Tensor for t in tensors) and has_torch_function(tensors): return handle_torch_function(original_cat, relevant_args=tensors, tensors=tensors, dim=dim) else: return original_cat(tensors=tensors, dim=dim)
def multi_head_attention_forward(self, query, # type: Tensor key, # type: Tensor value, # type: Tensor embed_dim_to_check, # type: int num_heads, # type: int in_proj_weight, # type: Tensor in_proj_bias, # type: Tensor bias_k, # type: Optional[Tensor] bias_v, # type: Optional[Tensor] add_zero_attn, # type: bool dropout_p, # type: float out_proj_weight, # type: Tensor out_proj_bias, fixed_k=None, # type: Tensor fixed_q=None, # type: Tensor training=True, # type: bool key_padding_mask=None, # type: Optional[Tensor] need_weights=True, # type: bool attn_mask=None, # type: Optional[Tensor] use_separate_proj_weight=False, # type: bool q_proj_weight=None, # type: Optional[Tensor] k_proj_weight=None, # type: Optional[Tensor] v_proj_weight=None, # type: Optional[Tensor] static_k=None, # type: Optional[Tensor] static_v=None # type: Optional[Tensor] ): # type: (...) -> Tuple[Tensor, Optional[Tensor]] r""" Args: query, key, value: map a query and a set of key-value pairs to an output. See "Attention Is All You Need" for more details. embed_dim_to_check: total dimension of the model. num_heads: parallel attention heads. in_proj_weight, in_proj_bias: input projection weight and bias. bias_k, bias_v: bias of the key and value sequences to be added at dim=0. add_zero_attn: add a new batch of zeros to the key and value sequences at dim=1. dropout_p: probability of an element to be zeroed. out_proj_weight, out_proj_bias: the output projection weight and bias. training: apply dropout if is ``True``. key_padding_mask: if provided, specified padding elements in the key will be ignored by the attention. This is an binary mask. When the value is True, the corresponding value on the attention layer will be filled with -inf. need_weights: output attn_output_weights. attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all the batches while a 3D mask allows to specify a different mask for the entries of each batch. use_separate_proj_weight: the function accept the proj. weights for query, key, and value in different forms. If false, in_proj_weight will be used, which is a combination of q_proj_weight, k_proj_weight, v_proj_weight. q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias. static_k, static_v: static key and value used for attention operators. Shape: Inputs: - query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is the embedding dimension. - key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is the embedding dimension. - value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is the embedding dimension. - key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length. If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions will be unchanged. If a BoolTensor is provided, the positions with the value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged. - attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length. 3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length, S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True`` are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor is provided, it will be added to the attention weight. - static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length, N is the batch size, E is the embedding dimension. E/num_heads is the head dimension. - static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length, N is the batch size, E is the embedding dimension. E/num_heads is the head dimension. Outputs: - attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is the embedding dimension. - attn_output_weights: :math:`(N, L, S)` where N is the batch size, L is the target sequence length, S is the source sequence length. """ try_import_torch() import torch from torch._overrides import has_torch_function, handle_torch_function from torch.nn.functional import linear, softmax, dropout if not torch.jit.is_scripting(): tens_ops = (query, key, value, in_proj_weight, in_proj_bias, bias_k, bias_v, out_proj_weight, out_proj_bias) if any([type(t) is not torch.Tensor for t in tens_ops]) and has_torch_function(tens_ops): return handle_torch_function( self.multi_head_attention_forward, tens_ops, query, key, value, embed_dim_to_check, num_heads, in_proj_weight, in_proj_bias, bias_k, bias_v, add_zero_attn, dropout_p, out_proj_weight, out_proj_bias, training=training, key_padding_mask=key_padding_mask, need_weights=need_weights, attn_mask=attn_mask, use_separate_proj_weight=use_separate_proj_weight, q_proj_weight=q_proj_weight, k_proj_weight=k_proj_weight, v_proj_weight=v_proj_weight, static_k=static_k, static_v=static_v) tgt_len, bsz, embed_dim = query.size() assert embed_dim == embed_dim_to_check # allow MHA to have different sizes for the feature dimension assert key.size(0) == value.size(0) and key.size(1) == value.size(1) head_dim = embed_dim // num_heads assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads" scaling = float(head_dim) ** -0.5 # self-attention #q = linear(query, in_proj_weight, in_proj_bias) #.chunk(1, dim=-1) #we assume we are in the case key==value==query v = linear(query, in_proj_weight, in_proj_bias) #.chunk(2, dim=-1) k = torch.cat([fixed_k.unsqueeze(1) for _ in range(key.shape[1])],dim=1) q = torch.cat([fixed_q.unsqueeze(1) for _ in range(key.shape[1])],dim=1) q = q * scaling #print(q.shape) #print(tgt_len) #aaadddsdss q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1) k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1) v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1) src_len = k.size(1) attn_output_weights = torch.bmm(q, k.transpose(1, 2)) assert list(attn_output_weights.size()) == [bsz * num_heads, tgt_len, src_len] attn_output_weights = softmax(attn_output_weights, dim=-1) attn_output_weights = dropout(attn_output_weights, p=dropout_p, training=training) attn_output = torch.bmm(attn_output_weights, v) assert list(attn_output.size()) == [bsz * num_heads, tgt_len, head_dim] attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim) attn_output = linear(attn_output, out_proj_weight, out_proj_bias) if need_weights: # average attention weights over heads attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len) return attn_output, attn_output_weights.sum(dim=1) / num_heads else: return attn_output, None
def quux(a): """Used to test that errors raised in user implementations get propagated""" if type(a) is not Tensor and has_torch_function((a, )): return handle_torch_function(quux, (a, ), a) return a
def baz(a, b): """A function with multiple arguments""" if type(a) is not Tensor or type(b) is not Tensor and has_torch_function( (a, b)): return handle_torch_function(baz, (a, b), a, b) return a + b
def bar(a): """A function with one argument""" if type(a) is not Tensor and has_torch_function((a, )): return handle_torch_function(bar, (a, ), a) return a
def biased_multi_head_attention_forward( query, # type: Tensor key, # type: Tensor value, # type: Tensor embed_dim_to_check, # type: int num_heads, # type: int in_proj_weight, # type: Tensor in_proj_bias, # type: Tuple[Tensor] bias_k, # type: Optional[Tensor] bias_v, # type: Optional[Tensor] kk_bias_r, # type: Tensor add_zero_attn, # type: bool dropout_p, # type: float out_proj_weight, # type: Tensor out_proj_bias, # type: Tensor training=True, # type: bool key_padding_mask=None, # type: Optional[Tensor] need_weights=True, # type: bool attn_mask=None, # type: Optional[Tensor] use_separate_proj_weight=False, # type: bool q_proj_weight=None, # type: Optional[Tensor] k_proj_weight=None, # type: Optional[Tensor] v_proj_weight=None, # type: Optional[Tensor] static_k=None, # type: Optional[Tensor] static_v=None # type: Optional[Tensor] ): # type: (...) -> Tuple[Tensor, Optional[Tensor]] r""" Args: query, key, value: map a query and a set of key-value pairs to an output. See "Attention Is All You Need" for more details. embed_dim_to_check: total dimension of the model. num_heads: parallel attention heads. in_proj_weight, in_proj_bias: input projection weight and bias. bias_k, bias_v: bias of the key and value sequences to be added at dim=0. add_zero_attn: add a new batch of zeros to the key and value sequences at dim=1. dropout_p: probability of an element to be zeroed. out_proj_weight, out_proj_bias: the output projection weight and bias. training: apply dropout if is ``True``. key_padding_mask: if provided, specified padding elements in the key will be ignored by the attention. This is an binary mask. When the value is True, the corresponding value on the attention layer will be filled with -inf. need_weights: output attn_output_weights. attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all the batches while a 3D mask allows to specify a different mask for the entries of each batch. use_separate_proj_weight: the function accept the proj. weights for query, key, and value in different forms. If false, in_proj_weight will be used, which is a combination of q_proj_weight, k_proj_weight, v_proj_weight. q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias. static_k, static_v: static key and value used for attention operators. Shape: Inputs: - query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is the embedding dimension. - key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is the embedding dimension. - value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is the embedding dimension. - key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length. If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions will be unchanged. If a BoolTensor is provided, the positions with the value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged. - attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length. 3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length, S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True`` are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor is provided, it will be added to the attention weight. - static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length, N is the batch size, E is the embedding dimension. E/num_heads is the head dimension. - static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length, N is the batch size, E is the embedding dimension. E/num_heads is the head dimension. Outputs: - attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is the embedding dimension. - attn_output_weights: :math:`(N, L, S)` where N is the batch size, L is the target sequence length, S is the source sequence length. """ if not torch.jit.is_scripting(): tens_ops = (query, key, value, in_proj_weight, bias_k, bias_v, out_proj_weight, out_proj_bias) if any([type(t) is not Tensor for t in tens_ops]) and has_torch_function(tens_ops): return handle_torch_function( multi_head_attention_forward, tens_ops, query, key, value, embed_dim_to_check, num_heads, in_proj_weight, in_proj_bias, bias_k, bias_v, kk_bias_r, add_zero_attn, dropout_p, out_proj_weight, out_proj_bias, training=training, key_padding_mask=key_padding_mask, need_weights=need_weights, attn_mask=attn_mask, use_separate_proj_weight=use_separate_proj_weight, q_proj_weight=q_proj_weight, k_proj_weight=k_proj_weight, v_proj_weight=v_proj_weight, static_k=static_k, static_v=static_v) tgt_len, bsz, embed_dim = query.size() assert embed_dim == embed_dim_to_check assert key.size() == value.size() head_dim = embed_dim // num_heads assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads" scaling = float(head_dim)**-0.5 if not use_separate_proj_weight: raise NotImplementedError() # if torch.equal(query, key) and torch.equal(key, value): # # self-attention # q, k, v = linear(query, in_proj_weight, in_proj_bias).chunk(3, dim=-1) # elif torch.equal(key, value): # # encoder-decoder attention # # This is inline in_proj function with in_proj_weight and in_proj_bias # _b = in_proj_bias # _start = 0 # _end = embed_dim # _w = in_proj_weight[_start:_end, :] # if _b is not None: # _b = _b[_start:_end] # q = linear(query, _w, _b) # if key is None: # assert value is None # k = None # v = None # else: # # This is inline in_proj function with in_proj_weight and in_proj_bias # _b = in_proj_bias # _start = embed_dim # _end = None # _w = in_proj_weight[_start:, :] # if _b is not None: # _b = _b[_start:] # k, v = linear(key, _w, _b).chunk(2, dim=-1) # else: # # This is inline in_proj function with in_proj_weight and in_proj_bias # _b = in_proj_bias # _start = 0 # _end = embed_dim # _w = in_proj_weight[_start:_end, :] # if _b is not None: # _b = _b[_start:_end] # q = linear(query, _w, _b) # # This is inline in_proj function with in_proj_weight and in_proj_bias # _b = in_proj_bias # _start = embed_dim # _end = embed_dim * 2 # _w = in_proj_weight[_start:_end, :] # if _b is not None: # _b = _b[_start:_end] # k = linear(key, _w, _b) # # This is inline in_proj function with in_proj_weight and in_proj_bias # _b = in_proj_bias # _start = embed_dim * 2 # _end = None # _w = in_proj_weight[_start:, :] # if _b is not None: # _b = _b[_start:] # v = linear(value, _w, _b) else: q_proj_weight_non_opt = torch.jit._unwrap_optional(q_proj_weight) len1, len2 = q_proj_weight_non_opt.size() assert len1 == embed_dim and len2 == query.size(-1) k_proj_weight_non_opt = torch.jit._unwrap_optional(k_proj_weight) len1, len2 = k_proj_weight_non_opt.size() assert len1 == embed_dim and len2 == key.size(-1) v_proj_weight_non_opt = torch.jit._unwrap_optional(v_proj_weight) len1, len2 = v_proj_weight_non_opt.size() assert len1 == embed_dim and len2 == value.size(-1) if in_proj_bias is not None: q = F.linear(query, q_proj_weight_non_opt, in_proj_bias[0]) k = F.linear(key, k_proj_weight_non_opt, in_proj_bias[1]) v = F.linear(value, v_proj_weight_non_opt, in_proj_bias[2]) else: q = F.linear(query, q_proj_weight_non_opt, in_proj_bias) k = F.linear(key, k_proj_weight_non_opt, in_proj_bias) v = F.linear(value, v_proj_weight_non_opt, in_proj_bias) # (S, N, 1) -> (N * num_heads, 1, S) kk_bias = (key.norm(dim=-1, keepdim=True).expand(-1, -1, num_heads) * kk_bias_r).view(-1, 1, bsz * num_heads).transpose(0, 2) q = q * scaling if attn_mask is not None: assert attn_mask.dtype == torch.float32 or attn_mask.dtype == torch.float64 or \ attn_mask.dtype == torch.float16 or attn_mask.dtype == torch.uint8 or attn_mask.dtype == torch.bool, \ 'Only float, byte, and bool types are supported for attn_mask, not {}'.format(attn_mask.dtype) if attn_mask.dtype == torch.uint8: warnings.warn( "Byte tensor for attn_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead." ) attn_mask = attn_mask.to(torch.bool) if attn_mask.dim() == 2: attn_mask = attn_mask.unsqueeze(0) if list(attn_mask.size()) != [1, query.size(0), key.size(0)]: raise RuntimeError( 'The size of the 2D attn_mask is not correct.') elif attn_mask.dim() == 3: if list(attn_mask.size()) != [ bsz * num_heads, query.size(0), key.size(0) ]: raise RuntimeError( 'The size of the 3D attn_mask is not correct.') else: raise RuntimeError( "attn_mask's dimension {} is not supported".format( attn_mask.dim())) # attn_mask's dim is 3 now. # convert ByteTensor key_padding_mask to bool if key_padding_mask is not None and key_padding_mask.dtype == torch.uint8: warnings.warn( "Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead." ) key_padding_mask = key_padding_mask.to(torch.bool) if bias_k is not None and bias_v is not None: if static_k is None and static_v is None: k = torch.cat([k, bias_k.repeat(1, bsz, 1)]) v = torch.cat([v, bias_v.repeat(1, bsz, 1)]) if attn_mask is not None: attn_mask = pad(attn_mask, (0, 1)) if key_padding_mask is not None: key_padding_mask = pad(key_padding_mask, (0, 1)) else: assert static_k is None, "bias cannot be added to static key." assert static_v is None, "bias cannot be added to static value." else: assert bias_k is None assert bias_v is None q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1) if k is not None: k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1) if v is not None: v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1) if static_k is not None: assert static_k.size(0) == bsz * num_heads assert static_k.size(2) == head_dim k = static_k if static_v is not None: assert static_v.size(0) == bsz * num_heads assert static_v.size(2) == head_dim v = static_v src_len = k.size(1) if key_padding_mask is not None: assert key_padding_mask.size(0) == bsz assert key_padding_mask.size(1) == src_len if add_zero_attn: src_len += 1 k = torch.cat([ k, torch.zeros( (k.size(0), 1) + k.size()[2:], dtype=k.dtype, device=k.device) ], dim=1) v = torch.cat([ v, torch.zeros( (v.size(0), 1) + v.size()[2:], dtype=v.dtype, device=v.device) ], dim=1) if attn_mask is not None: attn_mask = pad(attn_mask, (0, 1)) if key_padding_mask is not None: key_padding_mask = pad(key_padding_mask, (0, 1)) k = k.transpose(1, 2) kn = k.norm(dim=-2, keepdim=True) # (N * num_heads, 1, S) # QK^T attn_output_weights = torch.bmm(q, k) + kn * kk_bias assert list( attn_output_weights.size()) == [bsz * num_heads, tgt_len, src_len] if attn_mask is not None: if attn_mask.dtype == torch.bool: attn_output_weights.masked_fill_(attn_mask, float('-inf')) else: attn_output_weights += attn_mask if key_padding_mask is not None: attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len) attn_output_weights = attn_output_weights.masked_fill( key_padding_mask.unsqueeze(1).unsqueeze(2), float('-inf'), ) attn_output_weights = attn_output_weights.view(bsz * num_heads, tgt_len, src_len) attn_output_weights = F.softmax(attn_output_weights, dim=-1) attn_output_weights = F.dropout(attn_output_weights, p=dropout_p, training=training) attn_output = torch.bmm(attn_output_weights, v) assert list(attn_output.size()) == [bsz * num_heads, tgt_len, head_dim] attn_output = attn_output.transpose(0, 1).contiguous().view( tgt_len, bsz, embed_dim) attn_output = F.linear(attn_output, out_proj_weight, out_proj_bias) if need_weights: # average attention weights over heads attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len) return attn_output, attn_output_weights.sum(dim=1) / num_heads else: return attn_output, None