Esempio n. 1
0
def run():

    # gpu setting
    gpu = torch.device('cuda:' + str(opt.gpu))

    # for train
    lr, batch_size, num_ns = opt.lr, opt.batch_size, opt.num_ns
    reg = opt.reg

    # dataset
    data_path, dataset, LOO_seed = opt.data_path, opt.dataset, opt.LOO_seed
    user_count, item_count, train_mat, train_interactions, valid_sample, test_sample, candidates = read_LOO_settings(
        data_path, dataset, LOO_seed)

    train_dataset = implicit_CF_dataset(user_count, item_count, train_mat,
                                        train_interactions, num_ns)
    test_dataset = implicit_CF_dataset_test(user_count, test_sample,
                                            valid_sample, candidates)

    train_loader = data.DataLoader(train_dataset,
                                   batch_size=batch_size,
                                   shuffle=True)

    # Read teacher
    teacher_dims = 200
    teacher_model = BPR(user_count, item_count, teacher_dims, gpu)

    with torch.no_grad():
        teacher_model_path = opt.teacher_path + dataset + "/" + 'bpr_0.001_200_0.001.model_0'
        teacher_model = teacher_model.to(gpu)
        teacher_model.load_state_dict(torch.load(teacher_model_path))
        teacher_user_emb, teacher_item_emb = teacher_model.get_embedding()
        del teacher_model

    # Student model
    dim = int(teacher_dims * opt.percent)
    model = BPR_DE(user_count,
                   item_count,
                   teacher_user_emb,
                   teacher_item_emb,
                   gpu=gpu,
                   student_dim=dim,
                   num_experts=opt.num_expert)

    # optimizer
    model = model.to(gpu)
    optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=reg)

    # start train
    LOO_DE_run(opt,
               model,
               gpu,
               optimizer,
               train_loader,
               test_dataset,
               model_save_path=None)
Esempio n. 2
0
def run():

    # gpu setting
    gpu = torch.device('cuda:' + str(opt.gpu))

    # dataset setting
    data_path, dataset, LOO_seed = opt.data_path, opt.dataset, opt.LOO_seed

    # for train
    model, lr, batch_size, num_ns = opt.model, opt.lr, opt.batch_size, opt.num_ns
    reg = opt.reg

    # for URRD
    user_topk_dict = load_pickle('for_KD/',
                                 'citeULike.bpr.teacher_topk_dict_0')
    user_count, item_count, train_mat, train_interactions, valid_sample, test_sample, candidates = read_LOO_settings(
        data_path, dataset, LOO_seed)
    print("User::", user_count, "Item::", item_count, "Interactions::",
          len(train_interactions))

    train_dataset = implicit_CF_dataset_URRD(user_count, item_count, train_mat,
                                             train_interactions, num_ns, gpu,
                                             user_topk_dict, opt.U_T, opt.U_K,
                                             opt.U_L)
    test_dataset = implicit_CF_dataset_test(user_count, test_sample,
                                            valid_sample, candidates)
    train_loader = data.DataLoader(train_dataset,
                                   batch_size=batch_size,
                                   shuffle=True)

    # model
    teacher_dims = 200
    dim = int(teacher_dims * opt.percent)
    model = BPR(user_count, item_count, dim, gpu)

    # optimizer
    model = model.to(gpu)
    optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=reg)

    # start train
    LOO_URRD_run(opt,
                 model,
                 gpu,
                 optimizer,
                 train_loader,
                 test_dataset,
                 model_save_path=None)
Esempio n. 3
0
def run():

    # gpu setting
    gpu = torch.device('cuda:' + str(opt.gpu))

    # for training
    model, lr, batch_size, num_ns = opt.model, opt.lr, opt.batch_size, opt.num_ns
    reg = opt.reg
    save = opt.save

    # dataset
    data_path, dataset, LOO_seed = opt.data_path, opt.dataset, opt.LOO_seed
    user_count, item_count, train_mat, train_interactions, valid_sample, test_sample, candidates = read_LOO_settings(
        data_path, dataset, LOO_seed)

    train_dataset = implicit_CF_dataset(user_count, item_count, train_mat,
                                        train_interactions, num_ns)
    test_dataset = implicit_CF_dataset_test(user_count, test_sample,
                                            valid_sample, candidates)

    train_loader = data.DataLoader(train_dataset,
                                   batch_size=batch_size,
                                   shuffle=True)

    dim = opt.dim
    model = BPR(user_count, item_count, dim, gpu)

    # optimizer
    model = model.to(gpu)
    optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=reg)

    print("User::", user_count, "Item::", item_count, "Interactions::",
          len(train_interactions))

    # to save model
    model_save_path = 'None'
    if (save == 1):
        model_save_path = './Saved_models/' + opt.dataset + "/" + str(
            opt.model) + "_" + str(opt.lr) + "_" + str(opt.dim) + "_" + str(
                opt.reg) + '.model' + "_" + str(opt.LOO_seed)

    # start train
    LOO_run(opt, model, gpu, optimizer, train_loader, test_dataset,
            model_save_path)