Beispiel #1
0
class Engine(object):
    def __init__(self):
        self._metron = MetronAtK(top_k=10)

    def evaluate(self, model, evaluate_data, epoch_id):
        #Evaluate model
        model.eval()
        with torch.no_grad():
            test_users, test_items = evaluate_data[0], evaluate_data[1]
            negative_users, negative_items = evaluate_data[2], evaluate_data[3]

            test_users = test_users.cuda()
            test_items = test_items.cuda()
            negative_users = negative_users.cuda()
            negative_items = negative_items.cuda()
            test_scores = model(test_users, test_items)
            negative_scores = model(negative_users, negative_items)

            #to cpu
            test_users = test_users.cpu()
            test_items = test_items.cpu()
            test_scores = test_scores.cpu()
            negative_users = negative_users.cpu()
            negative_items = negative_items.cpu()
            negative_scores = negative_scores.cpu()
            self._metron.subjects = [
                test_users.data.view(-1).tolist(),
                test_items.data.view(-1).tolist(),
                test_scores.data.view(-1).tolist(),
                negative_users.data.view(-1).tolist(),
                negative_items.data.view(-1).tolist(),
                negative_scores.data.view(-1).tolist()
            ]
        hit_ratio, ndcg = self._metron.cal_hit_ratio(), self._metron.cal_ndcg()
        print('[Evluating Epoch {}] HR = {:.4f}, NDCG = {:.4f}'.format(
            epoch_id + 1, hit_ratio, ndcg))
        return hit_ratio, ndcg
class Engine(object):
    """Meta Engine for training & evaluating NCF model
    """
    def __init__(self, config):

        self.config = config  # model configuration
        self._metron = MetronAtK(top_k=10)
        self._writer = SummaryWriter(log_dir='runs/{}'.format(
            config['alias']))  # tensorboard writer
        self._writer.add_text('config', str(config), 0)
        self.opt = use_optimizer(self.model, config)
        # explicit feedback
        # self.crit = torch.nn.MSELoss()
        # implicit feedback
        self.crit = torch.nn.BCELoss()

    def train_single_batch(self, users, items, ratings):

        assert hasattr(self, 'model'), 'Please specify the exact model !'

        if self.config['use_cuda'] is True:
            users, items, ratings = users.cuda(), items.cuda(), ratings.cuda()

        self.opt.zero_grad()
        ratings_pred = self.model(users, items)

        loss = self.crit(ratings_pred.view(-1), ratings)
        loss.backward()
        self.opt.step()
        loss = loss.item()

        return loss

    def train_an_epoch(self, train_loader, epoch_id):

        assert hasattr(self, 'model'), 'Please specify the exact model !'

        self.model.train()
        total_loss = 0

        for batch_id, batch in enumerate(train_loader):

            assert isinstance(batch[0], torch.LongTensor)

            user, item, rating = batch[0], batch[1], batch[2]
            rating = rating.float()
            loss = self.train_single_batch(user, item, rating)

            print('[Training Epoch {}] Batch {}, Loss {}'.format(
                epoch_id, batch_id, loss))
            total_loss += loss

        self._writer.add_scalar('model/loss', total_loss, epoch_id)

    def evaluate(self, evaluate_data, epoch_id):

        assert hasattr(self, 'model'), 'Please specify the exact model !'

        self.model.eval()
        with torch.no_grad():

            test_users, test_items = evaluate_data[0], evaluate_data[1]
            negative_users, negative_items = evaluate_data[2], evaluate_data[3]

            if self.config['use_cuda'] is True:
                test_users = test_users.cuda()
                test_items = test_items.cuda()

                negative_users = negative_users.cuda()
                negative_items = negative_items.cuda()

            test_scores = self.model(test_users, test_items)
            negative_scores = self.model(negative_users, negative_items)

            if self.config['use_cuda'] is True:

                test_users = test_users.cpu()
                test_items = test_items.cpu()
                test_scores = test_scores.cpu()

                negative_users = negative_users.cpu()
                negative_items = negative_items.cpu()
                negative_scores = negative_scores.cpu()

            self._metron.subjects = [
                test_users.data.view(-1).tolist(),
                test_items.data.view(-1).tolist(),
                test_scores.data.view(-1).tolist(),
                negative_users.data.view(-1).tolist(),
                negative_items.data.view(-1).tolist(),
                negative_scores.data.view(-1).tolist()
            ]

        hit_ratio, ndcg = self._metron.cal_hit_ratio(), self._metron.cal_ndcg()

        self._writer.add_scalar('performance/HR', hit_ratio, epoch_id)
        self._writer.add_scalar('performance/NDCG', ndcg, epoch_id)

        print('[Evluating Epoch {}] HR = {:.4f}, NDCG = {:.4f}'.format(
            epoch_id, hit_ratio, ndcg))

        return hit_ratio, ndcg

    def save(self, alias, epoch_id, hit_ratio, ndcg):
        assert hasattr(self, 'model'), 'Please specify the exact model !'

        model_dir = self.config['model_dir'].format(alias, epoch_id, hit_ratio,
                                                    ndcg)
        save_checkpoint(self.model, model_dir)
class Engine(object):
    """Meta Engine for training & evaluating NCF model

    Note: Subclass should implement self.model !
    """
    def __init__(self, config):
        self.config = config  # model configuration
        self._metron = MetronAtK(top_k=10)
        self._writer = SummaryWriter(log_dir='runs/{}'.format(
            config['alias']))  # tensorboard writer
        self._writer.add_text('config', str(config), 0)
        self.opt = use_optimizer(self.model, config)
        self.crit = torch.nn.BCELoss()
        self.mse = torch.nn.MSELoss()
        self.sparse = False
        if config['friend_item_matrix'].split(".")[-1] == "npz":
            self.friend_item_matrix = scipy.sparse.load_npz(
                config['friend_item_matrix'])
            self.sparse = True
        else:
            self.friend_item_matrix = np.load(config['friend_item_matrix'])

    def shape_friend_ground_truth(self, users, items):
        friend_list = self.model.user_friend_indices[users].cpu().numpy()
        item_list = items.cpu().numpy()
        if self.sparse:
            friend_gt = []
            for friends, item in zip(friend_list, item_list):
                friend_gt.append(
                    (np.array(self.friend_item_matrix[item, friends].todense())
                     / self.friend_item_matrix[item, 0])[0])
        else:
            friend_gt = self.friend_item_matrix[
                item_list[:, None], friend_list] / (
                    self.friend_item_matrix[item_list[:, None], 0] + 1e-12)
        friend_gt = torch.FloatTensor(friend_gt)
        if self.config['use_cuda'] is True:
            friend_gt = friend_gt.cuda()
        return friend_gt

    def train_single_batch(self, users, items, ratings, friend_gt):
        assert hasattr(self, 'model'), 'Please specify the exact model !'
        if self.config['use_cuda'] is True:
            users, items, ratings = users.cuda(), items.cuda(), ratings.cuda()
        self.opt.zero_grad()
        ratings_pred, group_idx = self.model(users, items)
        # loss = self.crit(ratings_pred, ratings) + 0.01*self.mse(torch.squeeze(group_idx), friend_gt)
        l2_reg = None
        for n, p in self.model.named_parameters():
            if "attention" not in n:
                if l2_reg is None:
                    l2_reg = p.norm(2)
                else:
                    l2_reg = l2_reg + p.norm(2)

        loss = self.crit(ratings_pred, ratings) + l2_reg * self.config[
            'l2_other'] + self.config['alpha'] * self.crit(
                torch.squeeze(group_idx).view(-1), friend_gt.view(-1))
        loss.backward()
        self.opt.step()
        loss = loss.item()
        return loss

    def train_an_epoch(self, train_loader, epoch_id):
        assert hasattr(self, 'model'), 'Please specify the exact model !'
        self.model.train()
        total_loss = 0
        t1 = time()
        for batch_id, batch in enumerate(train_loader):
            assert isinstance(batch[0], torch.LongTensor)
            user, item, rating = Variable(batch[0]), Variable(
                batch[1]), Variable(batch[2])
            rating = rating.float()

            if user.size(0) != self.model.config['batch_size']:
                break

            if self.config['use_cuda'] is True:
                user = user.cuda()
                item = item.cuda()
                rating = rating.cuda()
            loss = self.train_single_batch(
                user, item, rating, self.shape_friend_ground_truth(user, item))
            if batch_id % 1000 == 0:
                t2 = time()
                print(
                    '[Training Epoch {}] Batch {}, Loss {:.4f}, Time {:.2f} '.
                    format(epoch_id, batch_id, loss, t2 - t1))
                t1 = time()
                print(torch.norm(self.model.attention_item.weight))
            total_loss += loss

        self._writer.add_scalar('model/loss', total_loss, epoch_id)

    def test_epoch(self, test_users, test_items):
        assert hasattr(self, 'model'), 'Please specify the exact model !'
        self.model.eval()
        with torch.no_grad():
            batch_size = self.model.config['batch_size']
            batch_num = (len(test_users) // batch_size) + 1
            score = []
            for i in range(batch_num):
                test_user_batch, test_item_batch = test_users[
                    batch_size * i:batch_size *
                    (i + 1)], test_items[batch_size * i:batch_size * (i + 1)]
                score_batch = self.model(test_user_batch, test_item_batch)[0]
                score_batch = score_batch if len(
                    score_batch.size()) else score_batch.unsqueeze(-1)
                score.append(score_batch)
        return torch.cat(score, -1)

    def evaluate(self, evaluate_data, epoch_id, save=True):
        assert hasattr(self, 'model'), 'Please specify the exact model !'
        self.model.eval()
        test_users, test_items = Variable(evaluate_data[0]), Variable(
            evaluate_data[1])
        negative_users, negative_items = Variable(evaluate_data[2]), Variable(
            evaluate_data[3])
        test_keys, negative_keys = evaluate_data[4], evaluate_data[5]
        if self.config['use_cuda'] is True:
            test_users = test_users.cuda()
            test_items = test_items.cuda()
            negative_users = negative_users.cuda()
            negative_items = negative_items.cuda()
        test_scores = self.test_epoch(test_users, test_items)
        negative_scores = self.test_epoch(negative_users, negative_items)
        if self.config['use_cuda'] is True:
            test_users = test_users.cpu()
            test_items = test_items.cpu()
            test_scores = test_scores.cpu()
            negative_users = negative_users.cpu()
            negative_items = negative_items.cpu()
            negative_scores = negative_scores.cpu()
        self._metron.subjects = [
            test_users.data.view(-1).tolist(),
            test_items.data.view(-1).tolist(),
            test_scores.data.view(-1).tolist(),
            negative_users.data.view(-1).tolist(),
            negative_items.data.view(-1).tolist(),
            negative_scores.data.view(-1).tolist(), test_keys, negative_keys
        ]
        hit_ratio, ndcg = self._metron.cal_hit_ratio(), self._metron.cal_ndcg()
        if save:
            self._writer.add_scalar('performance/HR', hit_ratio, epoch_id)
            self._writer.add_scalar('performance/NDCG', ndcg, epoch_id)
        print('[Evluating Epoch {}] HR = {:.4f}, NDCG = {:.4f}'.format(
            epoch_id, hit_ratio, ndcg))
        return hit_ratio, ndcg

    def save(self, alias, epoch_id, hit_ratio, ndcg):
        assert hasattr(self, 'model'), 'Please specify the exact model !'
        model_dir = self.config['model_dir'].format(alias, epoch_id, hit_ratio,
                                                    ndcg)
        save_checkpoint(self.model, model_dir)
class Engine(object):
    """Meta Engine for training & evaluating NCF model

    Note: Subclass should implement self.model !
    """
    def __init__(self, config):
        self.config = config  # model configuration
        self._metron = MetronAtK()
        self._writer = SummaryWriter(log_dir='runs/{}'.format(
            config['alias']))  # tensorboard writer
        self._writer.add_text('config', str(config), 0)
        self.opt = use_optimizer(self.model, config)
        if not config['implicit']:
            # explicit feedback
            self.crit = torch.nn.MSELoss()
        else:
            # implicit feedback
            self.crit = torch.nn.BCEWithLogitsLoss()

    def train_single_batch(self, users, items, ratings):
        assert hasattr(self, 'model'), 'Please specify the exact model !'
        if self.config['use_cuda'] is True:
            users, items, ratings = users.cuda(), items.cuda(), ratings.cuda()
        self.opt.zero_grad()
        ratings_logits = self.model(users, items)
        # print (ratings_pred)
        # print (ratings)
        loss = self.crit(ratings_logits.view(-1), ratings)
        loss.backward()

        self.opt.step()
        loss = loss.item()
        return loss

    def train_an_epoch(self, train_loader, epoch_id):
        assert hasattr(self, 'model'), 'Please specify the exact model !'
        self.model.train()
        total_loss = 0
        for batch_id, batch in enumerate(train_loader):
            assert isinstance(batch[0], torch.LongTensor)
            users, items, ratings = batch[0], batch[1], batch[2]
            ratings = ratings.float()
            loss = self.train_single_batch(users, items, ratings)

            print('[Training Epoch {}] Batch {}, Loss {}'.format(
                epoch_id, batch_id, loss))
            total_loss += loss
        self._writer.add_scalar('model/loss', total_loss, epoch_id)

    def evaluate(self, evaluate_data, train_negatives, epoch_id):
        assert hasattr(self, 'model'), 'Please specify the exact model !'
        self.model.eval()
        with torch.no_grad():
            test_users, test_items = evaluate_data[0], evaluate_data[1]
            negative_users, negative_items = evaluate_data[2], evaluate_data[3]
            test_weights, gold_scores = evaluate_data[4], evaluate_data[5]

            if self.config['use_cuda'] is True:
                test_users = test_users.cuda()
                test_items = test_items.cuda()
                negative_users = negative_users.cuda()
                negative_items = negative_items.cuda()
            test_scores = self.model(test_users, test_items)
            negative_scores = self.model(negative_users, negative_items)
            if self.config['use_cuda'] is True:
                test_users = test_users.cpu()
                test_items = test_items.cpu()
                test_scores = test_scores.cpu()
                negative_users = negative_users.cpu()
                negative_items = negative_items.cpu()
                negative_scores = negative_scores.cpu()
            # print (test_scores.data.view(-1).tolist())
            self._metron.subjects = [
                test_users.data.view(-1).tolist(),
                test_items.data.view(-1).tolist(),
                test_scores.data.view(-1).tolist(),
                negative_users.data.view(-1).tolist(),
                negative_items.data.view(-1).tolist(),
                negative_scores.data.view(-1).tolist(),
                test_weights.data.view(-1).tolist(),
                gold_scores.data.view(-1).tolist()
            ]

        print('[Evluating Epoch {}]'.format(epoch_id))
        basic_metric = 0
        if not self.config['implicit']:
            mae = self._metron.cal_basic_metric(train_negatives, 'mae')
            print('mae = {:.4f}'.format(mae))
            basic_metric = mae
        else:
            acc = self._metron.cal_basic_metric(train_negatives, 'acc')
            print('acc = {:.4f}'.format(acc))
            basic_metric = acc
        auc = self._metron.cal_basic_metric(train_negatives, 'auc')
        print('auc = {:.4f}'.format(auc))
        #hit_ratio1  = self._metron.cal_hit_ratio(1)
        hit_ratio5 = self._metron.cal_hit_ratio(5)
        hit_ratio10 = self._metron.cal_hit_ratio(10)
        #ndcg1 =  self._metron.cal_ndcg(1)
        ndcg5 = self._metron.cal_ndcg(5)
        ndcg10 = self._metron.cal_ndcg(10)
        print('HR@5 = {:.4f}, HR@10 = {:.4f},\
             NDCG@5 = {:.4f}, NDCG@10 = {:.4f}'.format(hit_ratio5, hit_ratio10,
                                                       ndcg5, ndcg10))
        # self._writer.add_scalar('performance/HR', hit_ratio, epoch_id)
        # self._writer.add_scalar('performance/NDCG', ndcg, epoch_id)
        return basic_metric, auc, hit_ratio5, hit_ratio10, ndcg5, ndcg10

    def save(self, alias, epoch_id, hit_ratio, ndcg):
        assert hasattr(self, 'model'), 'Please specify the exact model !'
        model_dir = self.config['model_dir'].format(alias, epoch_id, hit_ratio,
                                                    ndcg)
        save_checkpoint(self.model, model_dir)

    def load(self, alias, epoch_id, hit_ratio, ndcg):
        model_dir = self.config['model_dir'].format(alias, epoch_id, hit_ratio,
                                                    ndcg)
        device_id = 0
        if self.config['use_cuda'] is True:
            device_id = self.config['device_id']
        resume_checkpoint(self.model, model_dir=model_dir, device_id=device_id)
        return self.model
Beispiel #5
0
class GMF(pl.LightningModule):
    """
    Architecture from: https://github.com/yihong-chen/neural-collaborative-filtering
    Input: User tensor and Item tensor for Cross-entropy loss //
           user tensor, positive item tensor and negative item tensor for BPR loss
    Output: predicted interactions in logits (toget probability use a sigmoid function on the output)
    """

    def __init__(self, config):
        super(GMF, self).__init__()
        self.config = config
        self._metron = MetronAtK(top_k= config['topk'])
        self.crit = torch.nn.BCEWithLogitsLoss()

        self.num_users = config['num_users']
        self.num_items = config['num_items']
        self.latent_dim = config['latent_dim']

        self.embedding_user = torch.nn.Embedding(num_embeddings=self.num_users, embedding_dim=self.latent_dim)
        self.embedding_item = torch.nn.Embedding(num_embeddings=self.num_items, embedding_dim=self.latent_dim)

        self.affine_output = torch.nn.Linear(in_features=self.latent_dim, out_features=1)
        self.bpr = config['BPR_loss']
        self.logsigmoid = torch.nn.LogSigmoid()

    def forward(self, user_indices, item_indices):
        user_embedding = self.embedding_user(user_indices)
        item_embedding = self.embedding_item(item_indices)
        element_product = torch.mul(user_embedding, item_embedding)
        logits = self.affine_output(element_product)
        return logits

    def use_optimizer(self, config):
        optimizer = torch.optim.Adam(self.parameters(),
                                     lr=config['adam_lr'],
                                     weight_decay=config['l2_regularization'])

        return optimizer


    def training_step(self,batch,batch_idx):

        if self.bpr ==1:
            user,items_pos,items_neg = batch[0],batch[1], batch[2]
            ratings_pos = self(user,items_pos)
            rating_neg = self(user,items_neg)
            xij = ratings_pos- rating_neg
            loss = self.logsigmoid(xij)
            loss = -loss.sum()
        else:
            user,items,ratings = batch[0],batch[1], batch[2]
            ratings_pred = self(user,items)
            loss = self.crit(ratings_pred.view(-1), ratings)

        return {'loss':loss}



    def validation_step(self,batch,batch_idx):
        test_users, test_items , test_true = batch[0],batch[1], batch[2]
        test_pred = self(test_users, test_items)
        loss = self.crit(test_pred.view(-1), test_true)
        return {'val_loss':loss,'test_users': test_users.detach(),'test_items':test_items.detach(),
                'test_pred':test_pred.sigmoid().detach(),'test_true':test_true.detach() }

    def configure_optimizers(self):
        optimizer = self.use_optimizer(self.config)
        scheduler = ReduceLROnPlateau(optimizer, 'min', patience=500, factor=0.2, min_lr=1e-8)
        return [optimizer], [scheduler]

    def validation_epoch_end(self, outputs):
        avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
        test_users = torch.cat([x['test_users'] for x in outputs])
        test_items = torch.cat([x['test_items'] for x in outputs])
        test_pred = torch.cat([x['test_pred'] for x in outputs])
        test_true = torch.cat([x['test_true'] for x in outputs])

        self._metron.subjects = [test_users.data.view(-1).tolist(),
                                 test_items.data.view(-1).tolist(),
                                 test_pred.data.view(-1).tolist(),
                                 test_true.data.view(-1).tolist(),]


        hit_ratio, ndcg = self._metron.cal_hit_ratio(), self._metron.cal_ndcg_implicit()
        print('[Evluating Epoch {}] HR = {:.4f}, NDCG = {:.4f}'.format(self.current_epoch, hit_ratio, ndcg))
        log = {'val_loss':avg_loss,'HR':hit_ratio,'NDCG':ndcg}

        return {'log':log,'val_loss':ndcg,'HR':hit_ratio}


    def init_weight(self):
        pass
Beispiel #6
0
class Engine(object):
    """Meta Engine for training & evaluating NCF model

    Note: Subclass should implement self.model !
    """

    def __init__(self, config):
        self.config = config  # model configuration
        self._metron_s = MetronAtK(top_k=config['top_k'])
        self._metron_t = MetronAtK(top_k=config['top_k'])
        self._writer = SummaryWriter(log_dir='runs/{}'.format(config['alias']))  # tensorboard writer
        self._writer.add_text('config', str(config), 0)
        self.opt = use_optimizer(self.model, config)
        # explicit feedback
        # self.crit = torch.nn.MSELoss()
        # implicit feedback
        self.crit = torch.nn.BCELoss()

    def train_single_batch(self, users, items_s, ratings_s, items_t, ratings_t, item_vec_s, item_vec_t):
        assert hasattr(self, 'model'), 'Please specify the exact model !'
        if self.config['use_cuda'] is True:
            users, items_s, ratings_s, items_t, ratings_t = users.cuda(), items_s.cuda(), ratings_s.cuda(),items_t.cuda(), ratings_t.cuda()
            item_vec_s, item_vec_t = item_vec_s.cuda(), item_vec_t.cuda()
        self.opt.zero_grad()
        ratings_pred_s, ratings_pred_t = self.model(users, items_s,items_t, item_vec_s, item_vec_t)
        loss = self.crit(ratings_pred_s.view(-1), ratings_s) + self.crit(ratings_pred_t.view(-1), ratings_t)
        if self.config['weight_decay']:
            reg_loss = 0
            for p in self.model.transfer_layers.parameters():
                reg_loss += torch.sum(torch.abs(p))
            loss += self.config['weight_decay']*reg_loss
        loss.backward()
        self.opt.step()
        loss = loss.item()
        return loss

    def train_an_epoch(self, train_loader, epoch_id):
        assert hasattr(self, 'model'), 'Please specify the exact model !'
        self.model.train()
        total_loss = 0
        for batch_id, batch in enumerate(train_loader):
            assert isinstance(batch[0], torch.LongTensor)
            user, item_s, rating_s, item_t, rating_t = batch[0], batch[1], batch[2], batch[3], batch[4]
            item_vec_s = batch[5]
            item_vec_t = batch[6]
            rating_s = rating_s.float()
            rating_t = rating_t.float()
            loss = self.train_single_batch(user, item_s, rating_s, item_t, rating_t, item_vec_s, item_vec_t)
            if batch_id%1000==0:
                print('[Training Epoch {}] Batch {}, Loss {}'.format(epoch_id, batch_id, loss))
            total_loss += loss
        print('[Training Epoch {}], Loss {}'.format(epoch_id, total_loss/batch_id))
        self._writer.add_scalar('model/loss', total_loss, epoch_id)

    def evaluate(self, evaluate_data, epoch_id):
        assert hasattr(self, 'model'), 'Please specify the exact model !'
        self.model.eval()
        with torch.no_grad():
            test_users, test_items_s, test_items_t = evaluate_data[0], evaluate_data[1],evaluate_data[2]
            #
            item_vecs_s, item_vecs_t = evaluate_data[3], evaluate_data[4]
            negative_users, negative_items_s, negative_items_t = evaluate_data[5], evaluate_data[6], evaluate_data[7]
            #
            neg_item_vecs_s, neg_item_vecs_t = evaluate_data[8], evaluate_data[9]
            if self.config['use_cuda'] is True:
                test_users = test_users.cuda()
                test_items_s = test_items_s.cuda()
                test_items_t = test_items_t.cuda()
                negative_users = negative_users.cuda()
                negative_items_s = negative_items_s.cuda()
                negative_items_t = negative_items_t.cuda()
                #
                item_vecs_s, item_vecs_t = item_vecs_s.cuda(), item_vecs_t.cuda()
                neg_item_vecs_s, neg_item_vecs_t = neg_item_vecs_s.cuda(), neg_item_vecs_t.cuda()

            test_scores_s, test_scores_t = self.model(test_users, test_items_s, test_items_t, item_vecs_s, item_vecs_t)
            negative_scores_s, negative_scores_t = self.model(negative_users, negative_items_s, negative_items_t, neg_item_vecs_s, neg_item_vecs_t)
            if self.config['use_cuda'] is True:
                test_users = test_users.cpu()
                test_items_s = test_items_s.cpu()
                test_items_t = test_items_t.cpu()
                negative_users = negative_users.cpu()
                negative_items_s = negative_items_s.cpu()
                negative_items_t = negative_items_t.cpu()
                test_scores_s = test_scores_s.cpu()
                test_scores_t = test_scores_t.cpu()
                negative_scores_s, negative_scores_t = negative_scores_s.cpu(), negative_scores_t.cpu()
            self._metron_s.subjects = [test_users.data.view(-1).tolist(),
                                 test_items_s.data.view(-1).tolist(),
                                 test_scores_s.data.view(-1).tolist(),
                                 negative_users.data.view(-1).tolist(),
                                 negative_items_s.data.view(-1).tolist(),
                                 negative_scores_s.data.view(-1).tolist()]
            self._metron_t.subjects = [test_users.data.view(-1).tolist(),
                                       test_items_t.data.view(-1).tolist(),
                                       test_scores_t.data.view(-1).tolist(),
                                       negative_users.data.view(-1).tolist(),
                                       negative_items_t.data.view(-1).tolist(),
                                       negative_scores_t.data.view(-1).tolist()]
        hit_ratio_s, ndcg_s, mrr_s = self._metron_s.cal_hit_ratio(), self._metron_s.cal_ndcg(), self._metron_s.cal_mrr()
        hit_ratio_t, ndcg_t, mrr_t = self._metron_t.cal_hit_ratio(), self._metron_t.cal_ndcg(), self._metron_t.cal_mrr()

        self._writer.add_scalar('performance/HR_s', hit_ratio_s, epoch_id)
        self._writer.add_scalar('performance/NDCG_s', ndcg_s, epoch_id)
        self._writer.add_scalar('performance/MRR_s', mrr_s, epoch_id)
        self._writer.add_scalar('performance/HR_t', hit_ratio_t, epoch_id)
        self._writer.add_scalar('performance/NDCG_t', ndcg_t, epoch_id)
        self._writer.add_scalar('performance/MRR_t', mrr_t, epoch_id)
        print('[Evluating Epoch {}] HR_s = {:.4f}, NDCG_s = {:.4f}, MRR_s = {:.4f}, HR_t = {:.4f}, NDCG_t = {:.4f}, MRR_t = {:.4f}'.format(
            epoch_id, hit_ratio_s, ndcg_s, mrr_s, hit_ratio_t, ndcg_t, mrr_t))
        return hit_ratio_s, ndcg_s, mrr_s, hit_ratio_t, ndcg_t, mrr_t

    def save(self, alias, epoch_id, hit_ratio_s, ndcg_s,hit_ratio_t, ndcg_t):
        assert hasattr(self, 'model'), 'Please specify the exact model !'
        model_dir = self.config['model_dir'].format(alias, epoch_id, hit_ratio_s, ndcg_s,hit_ratio_t, ndcg_t)
        save_checkpoint(self.model, model_dir)
Beispiel #7
0
class Engine(object):
    """Meta Engine for training & evaluating NCF model

    Note: Subclass should implement self.model !
    """
    def __init__(self, config):
        self.config = config  # model configuration
        self._metron = MetronAtK(top_k=10)
        self.opt = use_optimizer(self.model, config)
        self.model_name = config['model']
        # explicit feedback
        # self.crit = torch.nn.MSELoss()
        # implicit feedback
        if self.model_name == 'MF':
            self.crit = torch.nn.MSELoss()
        else:
            self.crit = torch.nn.BCELoss()

    def train_single_batch(self, users, items, ratings):
        assert hasattr(self, 'model'), 'Please specify the exact model !'
        if self.config['use_cuda'] is True:
            users, items, ratings = users.cuda(), items.cuda(), ratings.cuda()
        self.opt.zero_grad()
        ratings_pred = self.model(users, items)
        loss = self.crit(ratings_pred.view(-1), ratings)
        loss.backward()
        self.opt.step()
        loss = loss.item()
        return loss

    def train_an_epoch(self, train_loader, epoch_id):
        assert hasattr(self, 'model'), 'Please specify the exact model !'
        self.model.train()
        total_loss = 0
        for batch_id, batch in enumerate(train_loader):
            assert isinstance(batch[0], torch.LongTensor)
            user, item, rating = batch[0], batch[1], batch[2]
            rating = rating.float()
            self.loss = self.train_single_batch(user, item, rating)
            if batch_id % 500 == 0:
                print('[Training Epoch {}] Batch {}, Loss {}'.format(
                    epoch_id, batch_id, self.loss))
            total_loss += self.loss

    def evaluate(self, evaluate_data, epoch_id):
        assert hasattr(self, 'model'), 'Please specify the exact model !'
        self.model.eval()
        with torch.no_grad():
            test_users, test_items = evaluate_data[0], evaluate_data[1]
            negative_users, negative_items = evaluate_data[2], evaluate_data[3]
            if self.config['use_cuda'] is True:
                test_users = test_users.cuda()
                test_items = test_items.cuda()
                negative_users = negative_users.cuda()
                negative_items = negative_items.cuda()
            test_scores = self.model(test_users, test_items)
            negative_scores = self.model(negative_users, negative_items)
            if self.config['use_cuda'] is True:
                test_users = test_users.cpu()
                test_items = test_items.cpu()
                test_scores = test_scores.cpu()
                negative_users = negative_users.cpu()
                negative_items = negative_items.cpu()
                negative_scores = negative_scores.cpu()
            self._metron.subjects = [
                test_users.data.view(-1).tolist(),
                test_items.data.view(-1).tolist(),
                test_scores.data.view(-1).tolist(),
                negative_users.data.view(-1).tolist(),
                negative_items.data.view(-1).tolist(),
                negative_scores.data.view(-1).tolist()
            ]
        hit_ratio, ndcg = self._metron.cal_hit_ratio(), self._metron.cal_ndcg()
        print('[Evluating Epoch {}] HR = {:.4f}, NDCG = {:.4f}'.format(
            epoch_id, hit_ratio, ndcg))
        return hit_ratio, ndcg
Beispiel #8
0
class Engine(object):
    """
    Meta Engine for training & evaluating NCF model
    """
    def __init__(self, config):
        """
        Function to initialize the engine
        :param config: configuration dictionary
        """
        self.config = config  # model configuration
        self._metron = MetronAtK(top_k=10)  # Metrics for Top-10
        self._writer = SummaryWriter(log_dir='runs/{}'.format(
            config['alias']))  # Tensorboard Writer
        self._writer.add_text('config', str(config),
                              0)  # String output for Tensorboard Writer
        self.opt = use_optimizer(self.model, config)  # set optimizer

        # self.crit = torch.nn.MSELoss() # mean squared error loss for explicit feedback
        self.crit = torch.nn.BCELoss(
        )  # binary cross entropy loss for implicit feedback

    def train_single_batch(self, users, items, ratings):
        """
        Function to train a single batch with back-propagation
        :param users: user data
        :param items: item data
        :param ratings: rating data
        :return: Loss value
        """

        assert hasattr(self, 'model'), 'Please specify the exact model !'

        # if self.config['use_cuda'] is True:
        #     users, items, ratings = users.cuda(), items.cuda(), ratings.cuda()

        self.opt.zero_grad()
        ratings_pred = self.model(users, items)

        # Get the loss with the choice of pre-defined loss function
        loss = self.crit(ratings_pred.view(-1), ratings)
        # Back-propagate the loss
        loss.backward()
        # Optimize the loss
        self.opt.step()
        # Get the final loss
        loss = loss.item()

        return loss

    def train_an_epoch(self, train_loader, epoch_id):
        """
        Function to train a single epoch
        :param train_loader: a Loader class for the training data
        :param epoch_id: current epoch
        :return:
        """
        assert hasattr(self, 'model'), 'Please specify the exact model !'

        # Initialize training mode for current model
        self.model.train()
        # Initialize total loss
        total_loss = 0

        # Loop through batches in the training data
        for batch_id, batch in enumerate(train_loader):
            assert isinstance(batch[0], torch.LongTensor)

            # Get user, item, and rating data
            user, item, rating = batch[0], batch[1], batch[2]
            rating = rating.float()

            # Train a single batch
            loss = self.train_single_batch(user, item, rating)

            print('[Training Epoch {}] Batch {}, Loss {}'.format(
                epoch_id, batch_id, loss))
            # Add up total loss
            total_loss += loss

        # Save the loss values to be displayed on TensorBoard
        self._writer.add_scalar('model/loss', total_loss, epoch_id)

    def evaluate(self, evaluate_data, epoch_id):
        """
        Function eo evaluate the model on test data
        :param evaluate_data: data array to be evaluated
        :param epoch_id: current epoch
        :return: values of Hit Ratio and NDCG metrics
        """
        assert hasattr(self, 'model'), 'Please specify the exact model !'

        # Initialize evaluation mode for current model
        self.model.eval()

        # Use 'no_grad' to reduce the memory usage and speed up computations (no Gradient Calculation)
        with torch.no_grad():
            # Get test user and test item data
            test_users, test_items = evaluate_data[0], evaluate_data[1]
            # Get negative user and negative item data
            negative_users, negative_items = evaluate_data[2], evaluate_data[3]

            # if self.config['use_cuda'] is True:
            #     test_users = test_users.cuda()
            #     test_items = test_items.cuda()
            #
            #     negative_users = negative_users.cuda()
            #     negative_items = negative_items.cuda()

            # Calculate test scores
            test_scores = self.model(test_users, test_items)
            # Calculate negative scores
            negative_scores = self.model(negative_users, negative_items)

            # if self.config['use_cuda'] is True:
            #
            #     test_users = test_users.cpu()
            #     test_items = test_items.cpu()
            #     test_scores = test_scores.cpu()
            #
            #     negative_users = negative_users.cpu()
            #     negative_items = negative_items.cpu()
            #     negative_scores = negative_scores.cpu()

            self._metron.subjects = [
                test_users.data.view(-1).tolist(),
                test_items.data.view(-1).tolist(),
                test_scores.data.view(-1).tolist(),
                negative_users.data.view(-1).tolist(),
                negative_items.data.view(-1).tolist(),
                negative_scores.data.view(-1).tolist()
            ]

        # Calculate Hit Ratio and NDCG values
        hit_ratio, ndcg = self._metron.cal_hit_ratio(), self._metron.cal_ndcg()

        # Save the HR and NDCG values to be displayed on TensorBoard writer
        self._writer.add_scalar('performance/HR', hit_ratio, epoch_id)
        self._writer.add_scalar('performance/NDCG', ndcg, epoch_id)

        print('[Evaluating Epoch {}] HR = {:.4f}, NDCG = {:.4f}'.format(
            epoch_id, hit_ratio, ndcg))

        return hit_ratio, ndcg

    def save(self, alias, epoch_id, hit_ratio, ndcg):
        """
        Function to save information for every run
        :param alias: alias info
        :param epoch_id: current epoch
        :param hit_ratio: value of Hit Ratio metric
        :param ndcg: value of NDCG metric
        """
        assert hasattr(self, 'model'), 'Please specify the exact model !'

        # Choose the model directory where the model will be saved
        model_dir = self.config['model_dir'].format(alias, epoch_id, hit_ratio,
                                                    ndcg)
        # Save the model
        save_checkpoint(self.model, model_dir)
Beispiel #9
0
class Engine(object):
    def __init__(self):
        self._metron = MetronAtK(top_k=10)
        
    def evaluate(self, model, test_loader, test_neg_loader, epoch_id, **kwargs):
        #Evaluate model
        a=time.time()
        model.eval()
 
        t_test_users=[]
        t_negative_users=[]
        t_test_items=[]
        t_negative_items=[]
        test_score=[]
        negative_score=[]
        dataloader_iterator = iter(test_loader)
        
        for i , data1 in enumerate(test_neg_loader): 

            try :
                data2 = next(dataloader_iterator)
                with torch.no_grad():    

                    test_positives_users, test_positives_pos,test_positives_neg,test_positives_image = data2
                    test_negatives_users, test_negatives_pos,test_negatives_neg,test_negatives_image = data1
                
                    test_scores = model(test_positives_users, test_positives_pos,test_positives_neg,test_positives_image)[1]
                    negative_scores = model(test_negatives_users, test_negatives_pos,test_negatives_neg,test_negatives_image)[1]
                

                    test_scores = test_scores.cpu()
                    negative_scores = negative_scores.cpu()

                    t_test_users.extend(test_positives_users.detach().numpy())
                    t_test_items.extend(test_positives_pos.detach().numpy())
                    t_negative_users.extend(test_negatives_users.detach().numpy())
                    t_negative_items.extend(test_negatives_pos.detach().numpy())
                    test_score.extend(test_scores.detach().numpy())
                    negative_score.extend(negative_scores.detach().numpy())

            except StopIteration: 
                with torch.no_grad():    
                    negative_users, negative_items = data1
                    
                    negative_scores = model(negative_users, negative_items)                    

                    
                    
                    
                    negative_scores = negative_scores.cpu()
                    t_negative_users.extend(negative_users.detach().numpy())
                    t_negative_items.extend(negative_items.detach().numpy())
                    negative_score.extend(negative_scores.detach().numpy())
    
        
        
        self._metron.subjects = [t_test_users,
                            t_test_items,
                            test_score,
                            t_negative_users,
                            t_negative_items,
                            negative_score]
        hit_ratio, ndcg = self._metron.cal_hit_ratio(), self._metron.cal_ndcg()
        print('[Evluating Epoch {}] HR = {:.4f}, HR2 = {:.4f}, NDCG = {:.4f}'.format(epoch_id+1, hit_ratio, ndcg))
        
        b=time.time()
        print("evaluate time:",b-a)  
        return hit_ratio, ndcg