Example #1
0
    def __init__(self, ntoken, ninp, dropout, name="", cuda=False):
        super(_netW, self).__init__()
        if cuda:
            self.word_embed = nn.Embedding(ntoken, ninp, padding_idx=0).cuda()
            self.Linear = share_Linear(self.word_embed.weight).cuda()
        else:
            self.word_embed = nn.Embedding(ntoken, ninp, padding_idx=0).cpu()
            self.Linear = share_Linear(self.word_embed.weight).cpu()

        self.init_weights()
        self.d = dropout
        self.name = name
Example #2
0
 def __init__(self, ntoken, ninp, dropout, pretrained_wemb):
     super(_netW, self).__init__()
     #self.word_embed = nn.Embedding(ntoken+1, ninp).cuda()
     self.word_embed = nn.Embedding(ntoken + 1, ninp, padding_idx=0).cuda()
     #pdb.set_trace()
     self.word_embed.weight.data.copy_(torch.from_numpy(pretrained_wemb))
     self.Linear = share_Linear(self.word_embed.weight).cuda()
     #self.init_weights()
     self.d = dropout
Example #3
0
 def __init__(self, ntoken, ninp, dropout):
     super(_netW, self).__init__()
     self.word_embed = nn.Embedding(ntoken+1, ninp).cuda()
     self.Linear = share_Linear(self.word_embed.weight).cuda()
     self.init_weights()
     self.d = dropout
Example #4
0
 def __init__(self, ntoken, ninp, dropout):
     super(_netW, self).__init__()
     self.word_embed = nn.Embedding(ntoken+1, ninp).cuda()
     self.Linear = share_Linear(self.word_embed.weight).cuda()
     self.init_weights()
     self.d = dropout