Ejemplo n.º 1
0
    def evaluate_test(self, train_data, val_negative_dict, val_negative,sup_dict):
        keep_weight = deepcopy(self.state_dict())
        hr_list, ndcg_list = [],[]
        idxs = range(train_data.idx)
        ts = []
        for idx in idxs:
            positive_x, supp_x, supp_y, sup_mdsk, positive_mdsk, _,_,_, len_pos = train_data.get_batch(
                [idx])
            u = train_data.support_fill[idx]['user']
            pos_embedding = torch.mul(self.item_embeddings[positive_x],
                                      positive_mdsk.unsqueeze(2).repeat(1, 1, 32)).sum(1)
            pos_embedding = torch.mul(pos_embedding, len_pos.unsqueeze(1))

            user_embedding = self.generate_layer1(pos_embedding)
            user_embedding = user_embedding.unsqueeze(1)
            for epoch in range(self.args.local_epoch):
                t1 = time()
                predicted = torch.bmm(user_embedding, self.item_embeddings[supp_x].permute(0, 2, 1)).squeeze(1)
                loss = self.loss_func(predicted, supp_y, user_embedding, self.item_embeddings[supp_x], sup_mdsk,len_pos)
                grad_user_embedding = torch.autograd.grad(loss, user_embedding, retain_graph=True)[0]
                user_embedding = user_embedding - torch.mul(grad_user_embedding, self.local_lr)
                t2 = time()
                ts.append(t2-t1)
            hr, ndcg = evaluation.metrics_meta(self, user_embedding[0], u, val_negative_dict, val_negative,
                                               self.args.topK,self.item_num,train_data)
            hr_list.extend(hr)
            ndcg_list.extend(ndcg)
            self.load_state_dict(keep_weight)
        return hr_list, ndcg_list
Ejemplo n.º 2
0
    def evaluate(self, train_data, val_negative_dict, val_negative):
        keep_weight = deepcopy(self.state_dict())
        val_hr_list, val_ndcg_list, train_loss, val_loss = [], [], [], []
        if self.args.mod=='train':
            idxs = random.sample(range(len(train_data.support_fill)), 1000)
        else:
            idxs = range(train_data.idx)
        for idx in idxs:
            positive_x, supp_x, supp_y, sup_mdsk, positive_mdsk, _,_,_, len_pos = train_data.get_batch(
                [idx])
            u = train_data.support_fill[idx]['user']
            pos_embedding = torch.mul(self.item_embeddings[positive_x],
                                      positive_mdsk.unsqueeze(2).repeat(1, 1, 32)).sum(1)
            pos_embedding = torch.mul(pos_embedding, len_pos.unsqueeze(1))

            user_embedding = self.generate_layer1(pos_embedding)
            user_embedding = user_embedding.unsqueeze(1)
            for epoch in range(self.args.local_epoch):
                predicted = torch.bmm(user_embedding, self.item_embeddings[supp_x].permute(0, 2, 1)).squeeze(1)
                loss = self.loss_func(predicted, supp_y, user_embedding, self.item_embeddings[supp_x], sup_mdsk,len_pos)
                grad_user_embedding = torch.autograd.grad(loss, user_embedding, retain_graph=True)[0]
                user_embedding = user_embedding - torch.mul(grad_user_embedding, self.local_lr)
            hr, ndcg = evaluation.metrics_meta(self, user_embedding[0], u, val_negative_dict, val_negative,
                                               self.args.topK,self.item_num,train_data)
            val_hr_list.extend(hr)
            val_ndcg_list.extend(ndcg)
            self.load_state_dict(keep_weight)

        return np.mean(val_hr_list), np.mean(val_ndcg_list)
Ejemplo n.º 3
0
 def evaluate_test(self, train_data, val_negative_dict, val_negative,
                   sup_dict):
     keep_weight = deepcopy(self.model.state_dict())
     hr_list, ndcg_list = [], []
     tmp_train_loss = []
     ts = []
     if self.args.mod == 'train':
         idxs = random.sample(range(len(train_data.support_fill)), 1000)
     else:
         idxs = range(train_data.idx)
     for idx in idxs:
         positive_x, supp_x, supp_y, sup_mdsk, positive_mdsk, query_x, query_y, query_mdsk, len_pos = train_data.get_batch(
             [idx])
         u = train_data.support_fill[idx]['user']
         user_embedding_init = deepcopy(
             self.model.user_embeddings).unsqueeze(0)
         for _ in range(self.args.local_epoch):
             t1 = time()
             if _ == 0:
                 loss = self.model.forward(user_embedding_init, supp_x,
                                           supp_y, sup_mdsk)
                 grad_user_embedding = torch.autograd.grad(
                     loss, user_embedding_init, retain_graph=True)[0]
                 user_embedding_update = user_embedding_init - torch.mul(
                     grad_user_embedding, self.model.local_lr)
             else:
                 loss = self.model.forward(user_embedding_update, supp_x,
                                           supp_y, sup_mdsk)
                 grad_user_embedding = torch.autograd.grad(
                     loss, user_embedding_update, retain_graph=True)[0]
                 user_embedding_update = user_embedding_update - torch.mul(
                     grad_user_embedding, self.model.local_lr)
             t2 = time()
         tmp_train_loss.append(tensorToScalar(loss))
         hr, ndcg = evaluation.metrics_meta(self, user_embedding_update[0],
                                            u, val_negative_dict,
                                            val_negative, self.args.topK,
                                            self.item_num, train_data)
         hr_list.extend(hr)
         ndcg_list.extend(ndcg)
         self.model.load_state_dict(keep_weight)
     return hr_list, ndcg_list