def __init__(self, embedding_size, hidden_size, num_layers, num_heads, total_key_depth, total_value_depth, filter_size, max_length=3000, input_dropout=0.0, layer_dropout=0.0, attention_dropout=0.0, relu_dropout=0.0, use_mask=False, universal=False, concept=False): """ Parameters: embedding_size: Size of embeddings hidden_size: Hidden size num_layers: Total layers in the Encoder 2 num_heads: Number of attention heads 2 total_key_depth: Size of last dimension of keys. Must be divisible by num_head 40 total_value_depth: Size of last dimension of values. Must be divisible by num_head 40 output_depth: Size last dimension of the final output filter_size: Hidden size of the middle layer in FFN 50 max_length: Max sequence length (required for timing signal) input_dropout: Dropout just after embedding layer_dropout: Dropout for each layer attention_dropout: Dropout probability after attention (Should be non-zero only during training) relu_dropout: Dropout probability after relu in FFN (Should be non-zero only during training) use_mask: Set to True to turn on future value masking """ super(Encoder, self).__init__() self.universal = universal self.num_layers = num_layers self.timing_signal = _gen_timing_signal(max_length, hidden_size) if (self.universal): ## for t self.position_signal = _gen_timing_signal(num_layers, hidden_size) params = (hidden_size, total_key_depth or hidden_size, total_value_depth or hidden_size, filter_size, num_heads, _gen_bias_mask(max_length) if use_mask else None, layer_dropout, attention_dropout, relu_dropout) self.embedding_proj = nn.Linear(embedding_size, hidden_size, bias=False) if (self.universal): self.enc = EncoderLayer(*params) else: self.enc = nn.ModuleList( [EncoderLayer(*params) for _ in range(num_layers)]) self.layer_norm = LayerNorm(hidden_size) self.input_dropout = nn.Dropout(input_dropout)
def __init__(self, embedding_size, hidden_size, num_layers, num_heads, total_key_depth, total_value_depth, filter_size, max_length=config.max_enc_steps, input_dropout=0.0, layer_dropout=0.0, attention_dropout=0.0, relu_dropout=0.0): """ Parameters: embedding_size: Size of embeddings hidden_size: Hidden size num_layers: Total layers in the Encoder num_heads: Number of attention heads total_key_depth: Size of last dimension of keys. Must be divisible by num_head total_value_depth: Size of last dimension of values. Must be divisible by num_head output_depth: Size last dimension of the final output filter_size: Hidden size of the middle layer in FFN max_length: Max sequence length (required for timing signal) input_dropout: Dropout just after embedding layer_dropout: Dropout for each layer attention_dropout: Dropout probability after attention (Should be non-zero only during training) relu_dropout: Dropout probability after relu in FFN (Should be non-zero only during training) """ super(Decoder, self).__init__() self.num_layers = num_layers self.timing_signal = _gen_timing_signal(max_length, hidden_size) self.mask = _get_attn_subsequent_mask( max_length) # mask to hide future params = ( hidden_size, total_key_depth or hidden_size, total_value_depth or hidden_size, filter_size, num_heads, _gen_bias_mask(max_length), # mandatory layer_dropout, attention_dropout, relu_dropout) self.embedding_proj = nn.Linear(embedding_size, hidden_size, bias=False) # input to decoder: tuple consisting of decoder inputs and encoder output self.dec = nn.Sequential( *[DecoderLayer(*params) for l in range(num_layers)]) self.layer_norm = LayerNorm(hidden_size) self.input_dropout = nn.Dropout(input_dropout)
def __init__(self, hidden_size, total_key_depth, total_value_depth, filter_size, num_heads, bias_mask, layer_dropout=0.0, attention_dropout=0.0, relu_dropout=0.0): """ Parameters: hidden_size: Hidden size total_key_depth: Size of last dimension of keys. Must be divisible by num_head total_value_depth: Size of last dimension of values. Must be divisible by num_head output_depth: Size last dimension of the final output filter_size: Hidden size of the middle layer in FFN num_heads: Number of attention heads bias_mask: Masking tensor to prevent connections to future elements layer_dropout: Dropout for this layer attention_dropout: Dropout probability after attention (Should be non-zero only during training) relu_dropout: Dropout probability after relu in FFN (Should be non-zero only during training) """ super(DecoderLayerContextV, self).__init__() self.multi_head_attention_dec = MultiHeadAttention( hidden_size, total_key_depth, total_value_depth, hidden_size, num_heads, bias_mask, attention_dropout) self.multi_head_attention_enc_dec = MultiHeadAttention( hidden_size, total_key_depth, total_value_depth, hidden_size, num_heads, None, attention_dropout) self.positionwise_feed_forward = PositionwiseFeedForward( hidden_size, filter_size, hidden_size, layer_config='cc', padding='left', dropout=relu_dropout) self.dropout = nn.Dropout(layer_dropout) self.layer_norm_mha_dec = LayerNorm(hidden_size) self.layer_norm_mha_enc = LayerNorm(hidden_size) self.layer_norm_ffn = LayerNorm(hidden_size)
def __init__(self, embedding_size, hidden_size, num_layers, num_heads, total_key_depth, total_value_depth, filter_size, max_length=1000, input_dropout=0.0, layer_dropout=0.0, attention_dropout=0.0, relu_dropout=0.0, use_mask=False, universal=False): # super(EmotionInputEncoder, self).__init__() # self.universal = universal # self.num_layers = num_layers # self.timing_signal = _gen_timing_signal(max_length, hidden_size) # if(self.universal): # ## for t # self.position_signal = _gen_timing_signal(num_layers, hidden_size) # params =(hidden_size, # total_key_depth or hidden_size, # total_value_depth or hidden_size, # filter_size, # num_heads, # _gen_bias_mask(max_length) if use_mask else None, # layer_dropout, # attention_dropout, # relu_dropout) # self.embedding_proj = nn.Linear(embedding_size, hidden_size, bias=False) # if(self.universal): # self.enc = EmotionInputAttentionLayer(*params) # else: # self.enc = nn.Sequential(*[EmotionInputAttentionLayer(*params) for l in range(num_layers)]) # self.layer_norm = LayerNorm(hidden_size) # self.input_dropout = nn.Dropout(input_dropout) # if(config.act): # self.act_fn = ACT_basic(hidden_size) # self.remainders = None # self.n_updates = None super(ComplexResDecoder, self).__init__() self.universal = universal self.num_layers = num_layers self.timing_signal = _gen_timing_signal(max_length, hidden_size) if (self.universal): self.position_signal = _gen_timing_signal(num_layers, hidden_size) self.mask = _get_attn_subsequent_mask(max_length) params = (hidden_size, total_key_depth or hidden_size, total_value_depth or hidden_size, filter_size, num_heads, _gen_bias_mask(max_length), # mandatory layer_dropout, attention_dropout, relu_dropout) if (self.universal): self.dec = ComplexEmoAttentionLayer(*params) else: self.dec = nn.Sequential(*[ComplexEmoAttentionLayer(*params) for _ in range(num_layers)]) self.embedding_proj = nn.Linear(embedding_size, hidden_size, bias=False) self.layer_norm = LayerNorm(hidden_size) self.input_dropout = nn.Dropout(input_dropout)
def __init__(self, embedding_size, hidden_size, num_layers, num_heads, total_key_depth, total_value_depth, filter_size, max_length=512, input_dropout=0.0, layer_dropout=0.0, attention_dropout=0.0, relu_dropout=0.0, universal=False, multi_input=False, context_size=1, attention_fusion_type='mean'): """ Parameters: embedding_size: Size of embeddings hidden_size: Hidden size num_layers: Total layers in the Encoder num_heads: Number of attention heads total_key_depth: Size of last dimension of keys. Must be divisible by num_head total_value_depth: Size of last dimension of values. Must be divisible by num_head output_depth: Size last dimension of the final output filter_size: Hidden size of the middle layer in FFN max_length: Max sequence length (required for timing signal) input_dropout: Dropout just after embedding layer_dropout: Dropout for each layer attention_dropout: Dropout probability after attention (Should be non-zero only during training) relu_dropout: Dropout probability after relu in FFN (Should be non-zero only during training) multi_input: Whether use multiple attention modules in the decoder context_size: The number of multiple inputs """ super(Decoder, self).__init__() self.universal = universal self.num_layers = num_layers self.timing_signal = _gen_timing_signal(max_length, hidden_size) if (self.universal): ## for t self.position_signal = _gen_timing_signal(num_layers, hidden_size) self.mask = _get_attn_subsequent_mask(max_length) params = (hidden_size, total_key_depth or hidden_size, total_value_depth or hidden_size, filter_size, num_heads, _gen_bias_mask(max_length), # mandatory layer_dropout, attention_dropout, relu_dropout, multi_input, context_size, attention_fusion_type) self.embedding_proj = nn.Linear(embedding_size, hidden_size, bias=False) if (self.universal): self.dec = DecoderLayer(*params) else: self.dec = nn.Sequential(*[DecoderLayer(*params) for l in range(num_layers)]) self.layer_norm = LayerNorm(hidden_size) self.input_dropout = nn.Dropout(input_dropout) self.multi_input = multi_input self.context_size = context_size
def __init__(self, embedding_size, hidden_size, num_layers, num_heads, total_key_depth, total_value_depth, filter_size, max_length=1000, input_dropout=0.0, layer_dropout=0.0, attention_dropout=0.0, relu_dropout=0.0, universal=False): """ Parameters: embedding_size: Size of embeddings hidden_size: Hidden size num_layers: Total layers in the Encoder num_heads: Number of attention heads total_key_depth: Size of last dimension of keys. Must be divisible by num_head total_value_depth: Size of last dimension of values. Must be divisible by num_head output_depth: Size last dimension of the final output filter_size: Hidden size of the middle layer in FFN max_length: Max sequence length (required for timing signal) input_dropout: Dropout just after embedding layer_dropout: Dropout for each layer attention_dropout: Dropout probability after attention (Should be non-zero only during training) relu_dropout: Dropout probability after relu in FFN (Should be non-zero only during training) """ super(Decoder, self).__init__() self.universal = universal self.num_layers = num_layers self.timing_signal = _gen_timing_signal(max_length, hidden_size) if (self.universal): ## for t self.position_signal = _gen_timing_signal(num_layers, hidden_size) self.mask = _get_attn_subsequent_mask(max_length) params = ( hidden_size, total_key_depth or hidden_size, total_value_depth or hidden_size, filter_size, num_heads, _gen_bias_mask(max_length), # mandatory layer_dropout, attention_dropout, relu_dropout) if config.aln_feature: self.align_proj = nn.Linear(config.emb_dim, config.hidden_dim) self.alignment_feature = nn.Embedding(num_embeddings=50, embedding_dim=config.emb_dim, padding_idx=config.PAD_idx) if (self.universal): self.dec = DecoderLayer(*params) else: self.dec = nn.Sequential( *[DecoderLayer(*params) for l in range(num_layers)]) self.embedding_proj = nn.Linear(embedding_size, hidden_size, bias=False) self.layer_norm = LayerNorm(hidden_size) self.input_dropout = nn.Dropout(input_dropout)