Ejemplo n.º 1
0
    def __init__(self, enc_word_vocab_size, dec_word_vocab_size, act_vocab_size, slot_vocab_size, 
            emb_dim, hid_dim, dropout, num_layers):

        super(SLUSystem, self).__init__()

        assert emb_dim == hid_dim
        # Initialization
        self.hid_dim = hid_dim

        # embedding
        self.dec_word_emb = Embedding(dec_word_vocab_size, emb_dim, Constants.PAD, dropout)
        self.act_emb = Embedding(act_vocab_size, emb_dim, Constants.PAD, dropout)
        self.slot_emb = Embedding(slot_vocab_size, emb_dim, Constants.PAD, dropout)

        # shared encoder
        config = BertConfig(
                enc_word_vocab_size, 
                hidden_size=hid_dim,
                num_hidden_layers=num_layers,
                num_attention_heads=10,
                intermediate_size=4*hid_dim,  # 4 * hid_dim
                hidden_act='gelu',
                max_position_embeddings=50
        )
        self.encoder = BertModel(config) 

        # act-slot-value predictors
        self.act_stc = STC(hid_dim, act_vocab_size, dropout, emb_dim)
        self.slot_stc = STC(hid_dim + emb_dim, slot_vocab_size, dropout, emb_dim)
        self.enc_to_dec = Encoder2Decoder(hid_dim, hid_dim)

        # the hid dim of decoder is 2 * hid_dim
        self.value_decoder = LSTMDecoder(self.dec_word_emb, self.act_emb, self.slot_emb,
                dec_word_vocab_size, hid_dim, dropout)
Ejemplo n.º 2
0
    def __init__(self, enc_word_vocab_size, dec_word_vocab_size, act_vocab_size, slot_vocab_size, 
            emb_dim, hid_dim, dropout):

        super(SLUSystem, self).__init__()

        # Initialization
        self.hid_dim = hid_dim

        # embedding
        self.word_emb = Embedding(enc_word_vocab_size, emb_dim, Constants.PAD, dropout)
        self.act_emb = Embedding(act_vocab_size, emb_dim, Constants.PAD, dropout)
        self.slot_emb = Embedding(slot_vocab_size, emb_dim, Constants.PAD, dropout)

        # shared encoder
        self.encoder_bidirectional = True
        if self.encoder_bidirectional:
            self.enc_hid_all_dim = 2 * hid_dim
        else:
            self.enc_hid_all_dim = hid_dim
        self.encoder = EncoderRNN('LSTM', self.encoder_bidirectional, 
                1, hid_dim, self.word_emb, 0.)

        # act-slot-value predictors
        self.act_stc = STC(self.enc_hid_all_dim, act_vocab_size, dropout, emb_dim)
        self.slot_stc = STC(self.enc_hid_all_dim + emb_dim, slot_vocab_size, dropout, emb_dim)
        self.enc_to_dec = Encoder2Decoder(self.enc_hid_all_dim, hid_dim)

        # the hid dim of decoder is 2 * hid_dim
        self.value_decoder = LSTMDecoder(self.word_emb, self.act_emb, self.slot_emb,
                dec_word_vocab_size, hid_dim, dropout)
Ejemplo n.º 3
0
    def __init__(self, wcn_emb_type, enc_word_vocab_size, dec_word_vocab_size,
                 act_vocab_size, slot_vocab_size, emb_dim, hid_dim, dropout):

        super(SLUSystem, self).__init__()

        # Initialization
        self.hid_dim = hid_dim

        # embedding
        self.dec_word_emb = Embedding(dec_word_vocab_size, emb_dim,
                                      Constants.PAD, dropout)
        self.act_emb = Embedding(act_vocab_size, emb_dim, Constants.PAD,
                                 dropout)
        self.slot_emb = Embedding(slot_vocab_size, emb_dim, Constants.PAD,
                                  dropout)

        # shared encoder
        if wcn_emb_type == 'swsr':
            self.enc_word_emb = SWSREmbedding(enc_word_vocab_size, emb_dim,
                                              Constants.PAD, dropout)
        elif wcn_emb_type == 'mwsr':
            self.enc_word_emb = MWSREmbedding(enc_word_vocab_size, emb_dim,
                                              Constants.PAD, dropout)
        elif wcn_emb_type == 'savg':
            self.enc_word_emb = SAVGEmbedding(enc_word_vocab_size, emb_dim,
                                              Constants.PAD, dropout)
        elif wcn_emb_type == 'cavg':
            self.enc_word_emb = CAVGEmbedding(enc_word_vocab_size, emb_dim,
                                              Constants.PAD, dropout)
        else:
            raise Exception('Undefined type of embedding.')

        self.encoder_bidirectional = True
        if self.encoder_bidirectional:
            self.enc_hid_all_dim = 2 * hid_dim
        else:
            self.enc_hid_all_dim = hid_dim
        self.encoder = EncoderRNN('LSTM', self.encoder_bidirectional, 1,
                                  hid_dim, self.enc_word_emb, 0.)

        # act-slot-value predictors
        self.act_stc = STC(self.enc_hid_all_dim, act_vocab_size, dropout,
                           emb_dim)
        self.slot_stc = STC(self.enc_hid_all_dim + emb_dim, slot_vocab_size,
                            dropout, emb_dim)
        self.enc_to_dec = Encoder2Decoder(self.enc_hid_all_dim, hid_dim)

        # the hid dim of decoder is 2 * hid_dim
        self.value_decoder = LSTMDecoder(self.dec_word_emb, self.act_emb,
                                         self.slot_emb, dec_word_vocab_size,
                                         hid_dim, dropout)
Ejemplo n.º 4
0
    def __init__(self,
                 vocab_size,
                 class_size,
                 emb_dim=100,
                 rnn_type='LSTM',
                 bidirectional=True,
                 hid_dim=128,
                 num_layers=1,
                 dropout=0.5):

        super(RNN2One, self).__init__()

        self.rnn_type = rnn_type
        self.num_layers = num_layers
        self.bidirectional = bidirectional

        if bidirectional:
            num_directions = 2
        else:
            num_directions = 1
        self.lin_dim = num_directions * hid_dim

        self.emb = Embedding(vocab_size, emb_dim, Constants.PAD, dropout)
        self.rnn = EncoderRNN(rnn_type, bidirectional, num_layers, hid_dim,
                              self.emb, dropout)

        self.dropout = nn.Dropout(dropout)
        self.lin = nn.Linear(self.lin_dim, class_size)

        self.init_params()
Ejemplo n.º 5
0
    def __init__(self, vocab_size, emb_dim, hid_dim, dropout):

        super(S2SPointer, self).__init__()

        self.word_emb = Embedding(vocab_size, emb_dim, Constants.PAD, dropout)
        self.encoder = EncoderRNN('LSTM', True, 1, hid_dim, self.word_emb, 0.)
        self.enc_to_dec = Encoder2Decoder(2 * hid_dim, 2 * hid_dim)
        self.decoder = LSTMDecoder(self.word_emb, vocab_size, 2 * hid_dim, dropout)
Ejemplo n.º 6
0
    def __init__(self, enc_word_vocab_size, dec_word_vocab_size, emb_dim,
                 hid_dim, dropout):

        super(DAModel, self).__init__()

        # Initialization
        self.hid_dim = hid_dim

        # embedding
        self.enc_word_emb = Embedding(enc_word_vocab_size, emb_dim,
                                      Constants.PAD, dropout)
        self.dec_word_emb = Embedding(dec_word_vocab_size, emb_dim,
                                      Constants.PAD, dropout)

        # encoder-decoder
        self.encoder = EncoderRNN('LSTM', True, 1, hid_dim, self.enc_word_emb,
                                  0.)
        self.enc_to_dec = Encoder2Decoder(2 * hid_dim, 2 * hid_dim)
        self.decoder = LSTMDecoder(self.dec_word_emb, dec_word_vocab_size,
                                   2 * hid_dim, dropout)