Ejemplo n.º 1
0
### build knowledge ###

print("@Build knowledge")
MAX_SEQ_LENGTH, item_dict, reversed_item_dict, item_probs = utils.build_knowledge(train_instances, validate_instances)

print("#Statistic")
NB_ITEMS = len(item_dict)
print(" + Maximum sequence length: ", MAX_SEQ_LENGTH)
print(" + Total items: ", NB_ITEMS)
print('density of C matrix: %.6f' % (real_adj_matrix.nnz * 1.0 / NB_ITEMS / NB_ITEMS))

batch_size = args.batch_size
# train_loader = data_utils.generate_data_loader(train_instances, load_param['batch_size'], item_dict, MAX_SEQ_LENGTH, is_bseq=True, is_shuffle=True)
# valid_loader = data_utils.generate_data_loader(validate_instances, load_param['batch_size'], item_dict, MAX_SEQ_LENGTH, is_bseq=True, is_shuffle=False)
test_loader = data_utils.generate_data_loader(test_instances, batch_size, item_dict, MAX_SEQ_LENGTH, is_bseq=True, is_shuffle=False)

pre_trained_model = model.RecSysModel(load_param, MAX_SEQ_LENGTH, item_probs, real_adj_matrix.todense(), device, model_data_type)
pre_trained_model.to(device, dtype= model_data_type)
optimizer = torch.optim.RMSprop(pre_trained_model.parameters(), lr= 0.001)

load_model, _, _, _, _, _, _, _, _ = check_point.load_ckpt(ckpt_path, pre_trained_model, optimizer)

def HLU_score_for_data(model, data_loader, batch_size):
    device = model.device
    nb_batch = len(data_loader.dataset) // batch_size
    if len(data_loader.dataset) % batch_size == 0:
        total_batch = nb_batch
    else :
        total_batch = nb_batch + 1
    print(total_batch)
Ejemplo n.º 2
0
NB_ITEMS = len(item_dict)
print(" + Maximum sequence length: ", MAX_SEQ_LENGTH)
print(" + Total items: ", NB_ITEMS)

# print('---------------------Load correlation matrix-------------------')
#
# if (os.path.isfile(data_dir + 'adj_matrix/r_matrix_' +str(nb_hop)+ 'w.npz')):
#     real_adj_matrix = sp.load_npz(data_dir + 'adj_matrix/r_matrix_' + str(nb_hop)+ 'w.npz')
# else:
#     real_adj_matrix = sp.csr_matrix((NB_ITEMS, NB_ITEMS), dtype="float32")
# print('Density of correlation matrix: %.6f' % (real_adj_matrix.nnz * 1.0 / NB_ITEMS / NB_ITEMS))

print('---------------------Create data loader--------------------')
train_loader = data_utils.generate_data_loader(train_instances,
                                               config_param['batch_size'],
                                               item_dict,
                                               MAX_SEQ_LENGTH,
                                               is_bseq=True,
                                               is_shuffle=True)
valid_loader = data_utils.generate_data_loader(validate_instances,
                                               config_param['batch_size'],
                                               item_dict,
                                               MAX_SEQ_LENGTH,
                                               is_bseq=True,
                                               is_shuffle=False)
test_loader = data_utils.generate_data_loader(test_instances,
                                              config_param['batch_size'],
                                              item_dict,
                                              MAX_SEQ_LENGTH,
                                              is_bseq=True,
                                              is_shuffle=False)