Пример #1
0
Файл: EKT.py Проект: Badstu/CAKT
    def forward(self, v, kn, ko, s, hidden):
        if hidden is None:
            h = self.h_initial.view(self.num_layers, self.know_length,
                                    self.seq_hidden_size)
            attn_h = self.h_initial
            length = Variable(torch.FloatTensor([0.]))
            beta = None

        else:

            h, vs, hs = hidden

            # calculate beta weights of seqs using dot product
            beta = torch.mm(vs, v.view(-1, 1)).view(-1)
            beta, idx = beta.topk(min(len(beta), self.k), sorted=False)
            beta = nn.functional.softmax(beta.view(1, -1), dim=-1)
            length = Variable(torch.FloatTensor([beta.size()[1]]))

            hs = hs.view(-1, self.know_length * self.seq_hidden_size)
            attn_h = torch.mm(beta, torch.index_select(hs, 0, idx)).view(-1)

        # calculate alpha weights of knowledges using dot product
        alpha = torch.mm(self.knowledge_memory, kn.view(-1, 1)).view(-1)
        alpha = nn.functional.softmax(alpha.view(1, -1), dim=-1)

        hkp = torch.mm(alpha,
                       attn_h.view(self.know_length,
                                   self.seq_hidden_size)).view(-1)
        pred_v = torch.cat([v, hkp]).view(1, -1)
        predict_score = self.score_layer(pred_v)

        # seq states update
        if self.score_mode == 'concat':
            x = v
        else:
            x = torch.cat([
                v * (s >= 0.5).type_as(v).expand_as(v),
                v * (s < 0.5).type_as(v).expand_as(v)
            ])
        x = torch.cat([x, s])

        # print(x.size())
        # print(torch.ones(self.know_length,1).size())
        # print(x.view(1, -1).size())
        # print(x.type())
        # xk = torch.mm(torch.ones(self.know_length, 1), x.view(1, -1))
        xk = x.view(1, -1).expand(self.know_length, -1)
        xk = alpha.view(-1, 1) * xk
        # xk = ko.float().view(-1, 1) * xk
        # xk = torch.mm(alpha, xk).view(-1)

        _, h = self.rnn(xk.unsqueeze(0), h)
        return predict_score.view(1), h, beta
Пример #2
0
Файл: EKT.py Проект: Badstu/CAKT
    def forward(self, v, s, hidden):
        if hidden is None:
            h = self.initial_h.view(self.num_layers, 1, self.seq_hidden_size)
            attn_h = self.initial_h
            length = Variable(torch.FloatTensor([0.]))
        else:
            h, vs, hs = hidden
            # print(h)
            # print('start')
            # print(vs.size())
            # print(v.size())
            # print(v.view(-1,1).size())
            # print(torch.mm(vs,v.view(-1,1)).size())

            # print(hs)

            # calculate alpha using dot product
            alpha = torch.mm(vs, v.view(-1, 1)).view(-1)
            # print(alpha.size())
            # print('end')
            # print(alpha.size())
            alpha, idx = alpha.topk(min(len(alpha), self.k), sorted=False)
            alpha = nn.functional.softmax(alpha.view(1, -1), dim=-1)

            length = Variable(torch.FloatTensor([alpha.size()[1]]))

            # flatten each h
            hs = hs.view(-1, self.num_layers * self.seq_hidden_size)
            attn_h = torch.mm(alpha, torch.index_select(hs, 0, idx)).view(-1)

        if self.with_last:
            pred_v = torch.cat([v, attn_h, h.view(-1), length]).view(1, -1)
        else:
            pred_v = torch.cat([v, attn_h]).view(1, -1)
        score = self.score(pred_v)

        if self.score_mode == 'concat':
            x = v
        else:
            x = torch.cat([
                v * (s >= 0.5).type_as(v).expand_as(v),
                v * (s < 0.5).type_as(v).expand_as(v)
            ])
        x = torch.cat([x, s])

        _, h = self.rnn(x.view(1, 1, -1), h)
        return score, h
Пример #3
0
Файл: EKT.py Проект: Badstu/CAKT
    def forward(self, v, kn, ko, s, h, beta=None):
        if h is None:
            h = self.h_initial.view(self.num_layers, self.know_length,
                                    self.seq_hidden_size)
            length = Variable(torch.FloatTensor([0.]))

        # calculate alpha weights of knowledges using dot product
        # print(self.knowledge_memory.size())
        # print(kn.view(-1, 1))
        if beta is None:
            alpha = torch.mm(self.knowledge_memory, kn.view(-1, 1)).view(-1)
            beta = nn.functional.softmax(alpha.view(1, -1), dim=-1)
            # print(beta.argmax(1))

        # print(alpha.size())

        # print(h.view(self.know_length, self.seq_hidden_size).size())
        # print(h.type())
        # predict score at time t
        hkp = torch.mm(beta, h.view(self.know_length,
                                    self.seq_hidden_size)).view(-1)
        # print(hkp.size())
        pred_v = torch.cat([v, hkp]).view(1, -1)
        # print(pred_v.size())
        predict_score = self.score_layer(pred_v)

        # seq states update
        if self.score_mode == 'concat':
            x = v
        else:
            x = torch.cat([
                v * (s >= 0.5).type_as(v).expand_as(v),
                v * (s < 0.5).type_as(v).expand_as(v)
            ])
        x = torch.cat([x, s])

        # print(x.size())
        # print(torch.ones(self.know_length,1).size())
        # print(x.view(1, -1).size())
        # print(x.type())
        # xk = torch.mm(torch.ones(self.know_length, 1), x.view(1, -1))
        xk = x.view(1, -1).expand(self.know_length, -1)
        xk = beta.view(-1, 1) * xk
        # xk = ko.float().view(-1, 1) * xk
        # print(xk.size())
        # print(alpha.size())
        # xk = torch.mm(alpha, xk).view(-1)
        # thresh, idx = alpha.topk(5)
        # alpha = (alpha >= thresh[0, 4]).float()
        # xk = alpha.view(-1, 1) * xk
        # xk = Variable(torch.zeros_like(x)).expand(self.know_length, -1)

        _, h = self.rnn(xk.unsqueeze(0), h)
        return predict_score.view(1), h
Пример #4
0
Файл: EKT.py Проект: Badstu/CAKT
 def __init__(self,
              topic_size,
              seq_hidden_size,
              k,
              score_mode,
              num_layers=1):
     super(AttnSeqTimeDecayModel, self).__init__()
     self.topic_size = topic_size
     self.seq_hidden_size = seq_hidden_size
     self.num_layers = num_layers
     self.score_mode = score_mode
     if self.score_mode == 'concat':
         self.rnn = nn.GRU(topic_size + 1, seq_hidden_size, num_layers)
     else:
         self.rnn = nn.GRU(topic_size * 2 + 1, seq_hidden_size, num_layers)
     self.score = nn.Linear(topic_size + seq_hidden_size, 1)
     self.k = k
     self.initial_h = Variable(torch.zeros(self.num_layers *
                                           self.seq_hidden_size),
                               requires_grad=True)
Пример #5
0
Файл: EKT.py Проект: Badstu/CAKT
 def default_hidden(self):
     return Variable(torch.zeros(self.num_layers, 1, self.seq_hidden_size))
Пример #6
0
Файл: EKT.py Проект: Badstu/CAKT
 def __init__(self, topic_size, k):
     super(AttnModel, self).__init__()
     self.user_emb_size = topic_size
     self.k = k
     self.initial_guess = Variable(torch.zeros(1), requires_grad=True)
Пример #7
0
Файл: EKT.py Проект: Badstu/CAKT
 def default_hidden(self):
     return Variable(torch.zeros(1, 1, self.topic_size))
Пример #8
0
Файл: EKT.py Проект: Badstu/CAKT
 def default_hidden(self, batch_size):
     return Variable(torch.zeros(2, batch_size, self.emb_size)), \
         Variable(torch.zeros(self.num_layers - 1,
                              batch_size, self.emb_size)) \
         if self.num_layers > 1 else None