def __init__(self, embdim, hdim, numlayers:int=1, dropout=0., zdim=None, sentence_encoder:SequenceEncoder=None, query_encoder:SequenceEncoder=None, feedatt=False, store_attn=True, minkl=0.05, **kw): super(BasicGenModel, self).__init__(**kw) self.minkl = minkl self.embdim, self.hdim, self.numlayers, self.dropout = embdim, hdim, numlayers, dropout self.zdim = embdim if zdim is None else zdim inpemb = torch.nn.Embedding(sentence_encoder.vocab.number_of_ids(), embdim, padding_idx=0) inpemb = TokenEmb(inpemb, rare_token_ids=sentence_encoder.vocab.rare_ids, rare_id=1) # _, covered_word_ids = load_pretrained_embeddings(inpemb.emb, sentence_encoder.vocab.D, # p="../../data/glove/glove300uncased") # load glove embeddings where possible into the inner embedding class # inpemb._do_rare(inpemb.rare_token_ids - covered_word_ids) self.inp_emb = inpemb encoder_dim = hdim encoder = LSTMEncoder(embdim, hdim // 2, num_layers=numlayers, dropout=dropout, bidirectional=True) # encoder = q.LSTMEncoder(embdim, *([encoder_dim // 2] * numlayers), bidir=True, dropout_in=dropout) self.inp_enc = encoder self.out_emb = torch.nn.Embedding(query_encoder.vocab.number_of_ids(), embdim, padding_idx=0) dec_rnn_in_dim = embdim + self.zdim + (encoder_dim if feedatt else 0) decoder_rnn = LSTMTransition(dec_rnn_in_dim, hdim, numlayers, dropout=dropout) self.out_rnn = decoder_rnn self.out_emb_vae = torch.nn.Embedding(query_encoder.vocab.number_of_ids(), embdim, padding_idx=0) self.out_enc = LSTMEncoder(embdim, hdim //2, num_layers=numlayers, dropout=dropout, bidirectional=True) # self.out_mu = torch.nn.Sequential(torch.nn.Linear(embdim, hdim), torch.nn.Tanh(), torch.nn.Linear(hdim, self.zdim)) # self.out_logvar = torch.nn.Sequential(torch.nn.Linear(embdim, hdim), torch.nn.Tanh(), torch.nn.Linear(hdim, self.zdim)) self.out_mu = torch.nn.Sequential(torch.nn.Linear(hdim, self.zdim)) self.out_logvar = torch.nn.Sequential(torch.nn.Linear(hdim, self.zdim)) decoder_out = BasicGenOutput(hdim + encoder_dim, vocab=query_encoder.vocab) # decoder_out.build_copy_maps(inp_vocab=sentence_encoder.vocab) self.out_lin = decoder_out self.att = q.Attention(q.SimpleFwdAttComp(hdim, encoder_dim, hdim), dropout=min(0.1, dropout)) self.enc_to_dec = torch.nn.ModuleList([torch.nn.Sequential( torch.nn.Linear(encoder_dim, hdim), torch.nn.Tanh() ) for _ in range(numlayers)]) self.feedatt = feedatt self.nocopy = True self.store_attn = store_attn self.reset_parameters()
def __init__(self, embdim, hdim, numlayers: int = 1, dropout=0., sentence_encoder: SequenceEncoder = None, query_encoder: SequenceEncoder = None, **kw): super(RankModel, self).__init__(**kw) inpemb = torch.nn.Embedding(sentence_encoder.vocab.number_of_ids(), embdim, padding_idx=0) inpemb = TokenEmb(inpemb, rare_token_ids=sentence_encoder.vocab.rare_ids, rare_id=1) # _, covered_word_ids = load_pretrained_embeddings(inpemb.emb, sentence_encoder.vocab.D, # p="../../data/glove/glove300uncased") # load glove embeddings where possible into the inner embedding class # inpemb._do_rare(inpemb.rare_token_ids - covered_word_ids) self.inp_emb = inpemb encoder = LSTMEncoder(embdim, hdim // 2, num_layers=numlayers, dropout=dropout, bidirectional=True) self.inp_enc = encoder decoder_emb = torch.nn.Embedding(query_encoder.vocab.number_of_ids(), embdim, padding_idx=0) self.out_emb = decoder_emb encoder = LSTMEncoder(embdim, hdim // 2, num_layers=numlayers, dropout=dropout, bidirectional=True) self.out_enc = encoder self.lin_map = torch.nn.Sequential(torch.nn.Linear(hdim, hdim), torch.nn.Tanh())
def create_model(embdim=100, hdim=100, dropout=0., numlayers: int = 1, sentence_encoder: SequenceEncoder = None, query_encoder: SequenceEncoder = None, feedatt=False, nocopy=False): inpemb = torch.nn.Embedding(sentence_encoder.vocab.number_of_ids(), embdim, padding_idx=0) inpemb = TokenEmb(inpemb, rare_token_ids=sentence_encoder.vocab.rare_ids, rare_id=1) encoder_dim = hdim encoder = LSTMEncoder(embdim, hdim // 2, numlayers, bidirectional=True, dropout=dropout) # encoder = PytorchSeq2SeqWrapper( # torch.nn.LSTM(embdim, hdim, num_layers=numlayers, bidirectional=True, batch_first=True, # dropout=dropout)) decoder_emb = torch.nn.Embedding(query_encoder.vocab.number_of_ids(), embdim, padding_idx=0) decoder_emb = TokenEmb(decoder_emb, rare_token_ids=query_encoder.vocab.rare_ids, rare_id=1) dec_rnn_in_dim = embdim + (encoder_dim if feedatt else 0) decoder_rnn = LSTMTransition(dec_rnn_in_dim, hdim, dropout=dropout) # decoder_out = BasicGenOutput(hdim + encoder_dim, query_encoder.vocab) decoder_out = PtrGenOutput(hdim + encoder_dim, out_vocab=query_encoder.vocab) decoder_out.build_copy_maps(inp_vocab=sentence_encoder.vocab) attention = q.Attention(q.SimpleFwdAttComp(hdim, encoder_dim, hdim), dropout=min(0.0, dropout)) # attention = q.Attention(q.DotAttComp(), dropout=min(0.0, dropout)) enctodec = torch.nn.ModuleList([ torch.nn.Sequential(torch.nn.Linear(encoder_dim, hdim), torch.nn.Tanh()) for _ in range(numlayers) ]) model = BasicGenModel(inpemb, encoder, decoder_emb, decoder_rnn, decoder_out, attention, enc_to_dec=enctodec, feedatt=feedatt, nocopy=nocopy) return model
def test_grad_time_states(self): enc = LSTMEncoder(10, 10, 2) x = torch.nn.Parameter(torch.randn(3, 6, 10)) mask = torch.tensor([ [1,1,1,0,0,0], [1,1,1,1,1,0], [1,1,1,1,0,0], ]) y, h = enc(x, mask) print(y.size()) print(len(h)) print(len(h[0])) print(h[0][0].size()) y[2].sum().backward() print(x.grad[:, :, :2])