示例#1
0
def BPR_train_original(dataset, recommend_model, loss_class, epoch, neg_k=1, w=None):
    Recmodel = recommend_model
    Recmodel.train()
    bpr: utils.BPRLoss = loss_class
    allusers = list(range(dataset.n_users))
    S, sam_time = utils.UniformSample_original(allusers, dataset)
    print(f"BPR[sample time][{sam_time[0]:.1f}={sam_time[1]:.2f}+{sam_time[2]:.2f}]")
    users = torch.Tensor(S[:, 0]).long()
    posItems = torch.Tensor(S[:, 1]).long()
    negItems = torch.Tensor(S[:, 2]).long()

    users = users.to(world.device)
    posItems = posItems.to(world.device)
    negItems = negItems.to(world.device)
    users, posItems, negItems = utils.shuffle(users, posItems, negItems)
    total_batch = len(users) // world.config['bpr_batch_size'] + 1
    aver_loss = 0.
    for (batch_i,
         (batch_users,
          batch_pos,
          batch_neg)) in enumerate(utils.minibatch(users,
                                                   posItems,
                                                   negItems,
                                                   batch_size=world.config['bpr_batch_size'])):
        cri = bpr.stageOne(batch_users, batch_pos, batch_neg)
        aver_loss += cri
        if world.tensorboard:
            w.add_scalar(f'BPRLoss/BPR', cri, epoch * int(len(users) / world.config['bpr_batch_size']) + batch_i)
    aver_loss = aver_loss / total_batch
    return f"[BPR[aver loss{aver_loss:.3e}]"
示例#2
0
 def _sample_train_data_at(self, epoch):
     epoch_path = self.train_temp + 'S_epoch_' + str(epoch) + '.npy'
     if os.path.exists(epoch_path):
         S = np.load(epoch_path)
     else:
         allusers = list(range(self.dataset.n_users))
         S, sam_time = utils.UniformSample_original(allusers, self.dataset)
         print(
             f"BPR[sample time][{sam_time[0]:.1f}={sam_time[1]:.2f}+{sam_time[2]:.2f}]"
         )
         np.save(epoch_path, S)
     users = torch.Tensor(S[:, 0]).long()
     posItems = torch.Tensor(S[:, 1]).long()
     negItems = torch.Tensor(S[:, 2]).long()
     users = users.to(world.device)
     posItems = posItems.to(world.device)
     negItems = negItems.to(world.device)
     return users, posItems, negItems
def BPR_train_original(dataset,
                       Recmodel,
                       bpr: utils.BPRLoss,
                       epoch,
                       w=None,
                       val=-1):
    if not args.simutaneously:
        # train alternatively
        Recmodel.train()
        for m in Recmodel.modules():
            if isinstance(m, nn.Embedding):
                m.requires_grad_(val <= 0)
            elif hasattr(m, 'weight'):
                m.requires_grad_(not (val <= 0))
        print('mapping fixed') if val <= 0 else print('embedding fixed')
    # for m in Recmodel.modules():  # test separate training
    #     if hasattr(m, 'weight'):
    #         if hasattr(m, 'bias') and m.bias is not None:
    #             print(m, 'weight:', m.weight.requires_grad, ' bias:', m.bias.requires_grad)
    #         else:
    #             print(m, 'weight', m.weight.requires_grad)

    S, sam_time = utils.UniformSample_original(dataset, val)  # bpr sample
    print(f"sample time:{sam_time[0]:.1f}={sam_time[1]:.2f}+{sam_time[2]:.2f}")
    users = torch.Tensor(S[:, 0]).long().to(args.device)
    posItems = torch.Tensor(S[:, 1]).long().to(args.device)
    negItems = torch.Tensor(S[:, 2]).long().to(args.device)
    users, posItems, negItems = utils.shuffle(users, posItems, negItems)
    total_batch = len(users) // args.bpr_batch_size + 1
    aver_loss = 0.
    for (batch_i, (batch_users, batch_pos, batch_neg)) \
            in enumerate(utils.minibatch(users, posItems, negItems, batch_size=args.bpr_batch_size)):
        # train on different graph
        cri = bpr.stageOne(dataset.graph[val + 1], batch_users, batch_pos,
                           batch_neg)
        aver_loss += cri
        if args.tensorboard:
            w.add_scalar(
                f'BPRLoss/BPR', cri,
                epoch * int(len(users) / args.bpr_batch_size) + batch_i)
    aver_loss = aver_loss / total_batch
    return aver_loss
示例#4
0
def BPR_train_original(dataset,
                       recommend_model,
                       loss_class,
                       epoch,
                       neg_k=1,
                       w=None):
    Recmodel = recommend_model
    Recmodel.train()
    bpr: utils.BPRLoss = loss_class

    with timer(name="Sample"):
        S = utils.UniformSample_original(dataset)
    users = torch.Tensor(S[:, 0]).long()
    posItems = torch.Tensor(S[:, 1]).long()
    negItems = torch.Tensor(S[:, 2]).long()

    users = users.to(world.device)
    posItems = posItems.to(world.device)
    negItems = negItems.to(world.device)
    users, posItems, negItems = utils.shuffle(users, posItems, negItems)
    total_batch = len(users) // world.config['bpr_batch_size'] + 1
    aver_loss = 0.
    for (batch_i, (batch_users, batch_pos, batch_neg)) in enumerate(
            utils.minibatch(users,
                            posItems,
                            negItems,
                            batch_size=world.config['bpr_batch_size'])):
        cri = bpr.stageOne(batch_users, batch_pos, batch_neg)
        aver_loss += cri
        if False:  #world.tensorboard:
            w.add_scalar(
                f'BPRLoss/BPR', cri,
                epoch * int(len(users) / world.config['bpr_batch_size']) +
                batch_i)
    aver_loss = aver_loss / total_batch
    time_info = timer.dict()
    timer.zero()
    return f"loss{aver_loss:.3f}-{time_info}"