Esempio n. 1
0
    def __init__(self, opt, dicts):
        self.layers = opt.layers
        self.input_feed = opt.input_feed
        input_size = opt.word_vec_size
        if self.input_feed:
            input_size += opt.rnn_size

        super(TreeDecoder, self).__init__()
        self.word_lut = nn.Embedding(dicts.size(), opt.word_vec_size, padding_idx=lib.Constants.PAD)
        self.rnn = StackedLSTM(opt.layers, input_size, opt.rnn_size, opt.dropout)
        if opt.has_attn:
            self.attn = lib.GlobalAttention(opt.rnn_size)
        self.dropout = nn.Dropout(opt.dropout)
        self.hidden_size =   opt.rnn_size
        self.opt = opt
Esempio n. 2
0
    def __init__(self, opt, dicts):
        self.layers = opt.layers
        self.input_feed = opt.input_feed
        input_size = opt.word_vec_size
        if self.input_feed:
            input_size += opt.rnn_size

        super(TreeDecoder_W2V, self).__init__()
        # self.word_lut = nn.Embedding(dicts.size(), opt.word_vec_size, padding_idx=lib.Constants.PAD)
        self.embeddings = gensim.models.Word2Vec.load(opt.embedding_w2v + 'processed_all.train_xe.comment.gz')

        self.rnn = StackedLSTM(opt.layers, input_size, opt.rnn_size, opt.dropout)
        if opt.has_attn:
            self.attn = lib.GlobalAttention(opt.rnn_size)
        self.dropout = nn.Dropout(opt.dropout)
        self.hidden_size =   opt.rnn_size
        self.opt = opt
        self.dicts = dicts