示例#1
0
# model = GGNN(hidden_size=opt.hiddenSize, out_size=opt.hiddenSize, batch_size=opt.batchSize, n_node=n_node,
#                  lr=opt.lr, l2=opt.l2,  step=opt.step, decay=opt.lr_dc_step * len(train_data.inputs) / opt.batchSize, lr_dc=opt.lr_dc,
#                  nonhybrid=opt.nonhybrid)
# print(opt)
# =============================================================================
#%%
best_result = [0, 0]
best_epoch = [0, 0]
for epoch in range(5):
    print('epoch: ', epoch, '===========================================')
    slices = train_data.generate_batch(model.batch_size)  # list of numpy array
    fetches = [model.opt, model.loss_train, model.global_step]
    print('start training: ', datetime.datetime.now())
    loss_ = []
    for i, j in zip(slices, np.arange(len(slices))):
        adj_in, adj_out, alias, item, mask, targets, imps, alias_t = train_data.get_slice(
            i)
        _, loss, _ = model.run(fetches, targets, item, adj_in, adj_out, alias,
                               mask, imps, alias_t)
        loss_.append(loss)
    loss = np.mean(loss_)
    slices = test_data.generate_batch(model.batch_size)
    print('start predicting: ', datetime.datetime.now())
    hit, mrr, test_loss_ = [], [], []
    for i, j in zip(slices, np.arange(len(slices))):
        adj_in, adj_out, alias, item, mask, targets, imps, alias_t = test_data.get_slice(
            i)
        scores, test_loss = model.run([model.score_test, model.loss_test],
                                      targets, item, adj_in, adj_out, alias,
                                      mask, imps, alias_t)
        test_loss_.append(test_loss)
        index = np.argsort(-(scores), 1)[:, :20]
                    default=10,
                    help='the number of epoch to wait before early stop ')
parser.add_argument('--nonhybrid',
                    action='store_true',
                    help='only use the global preference to predict')
parser.add_argument('--validation', action='store_true', help='validation')
parser.add_argument('--valid_portion',
                    type=float,
                    default=0.1,
                    help='split the portion of training set as validation set')
opt = parser.parse_args()
print(opt)

#test = SessionGraph(opt, 310)
#print(test)  #打印图神经网络的结构

#测试显示会话图
#model = trans_to_cuda(SessionGraph(opt, 310))  #模型构建就靠这句话

#测试读取训练数据的图的节点
#all_train_seq = pickle.load(open('../datasets/' + opt.dataset + '/all_train_seq.txt', 'rb'))
#g = build_graph(all_train_seq)
#print(len(g.node))

#测试
train_data = pickle.load(
    open('../datasets/' + opt.dataset + '/train.txt', 'rb'))
train_data_compare = Data(train_data, shuffle=True)
slices = train_data_compare.generate_batch(100)
slicesData = train_data_compare.get_slice(slices[1])
示例#3
0
文件: main.py 项目: judiebig/LGSR
                e_loss.append(s_loss)
            cost_time = time.time() - train_start
            m_print('Step: %d, Train cide_Loss: %.4f, Cost: %.2f' %
                    (cide_step, np.mean(e_loss), cost_time))
            if abs(pre_cite_loss - np.mean(e_loss)) <= 0.0005:
                early_stop = True
            pre_cite_loss = np.mean(e_loss)

    # pdb.set_trace()
    slices = train_data.generate_batch(opt.batch_size)
    fetches = [model.rec_opt, model.rec_loss, model.global_step]
    m_print('start train: ' +
            time.strftime('%m-%d %H:%M:%S ', time.localtime(time.time())))
    loss_ = []
    for i, j in zip(slices, np.arange(len(slices))):
        batch_input = train_data.get_slice(i)
        _, loss, _ = model.run_rec(fetches, batch_input)
        loss_.append(loss)
    loss = np.mean(loss_)

    slices = test_data.generate_batch(opt.batch_size)
    m_print('start predict:' +
            time.strftime('%m-%d %H:%M:%S ', time.localtime(time.time())))
    hit, mrr, test_loss_, sa_wei, a_wei, ans = [], [], [], [], [], []
    for i, j in zip(slices, np.arange(len(slices))):
        batch_input = test_data.get_slice(i)
        scores, test_loss, tk, satt, att = model.run_rec(
            [model.logits, model.rec_loss, model.top_k, model.satt, model.att],
            batch_input,
            is_train=False)
        test_loss_.append(test_loss)
示例#4
0
              lr_dc=opt.lr_dc,
              nonhybrid=opt.nonhybrid)
print(opt)
best_result = [0, 0]
best_epoch = [0, 0]
p_20 = []
mrr_20 = []

for epoch in range(opt.epoch):
    print('epoch: ', epoch, '===========================================')
    slices = train_data.generate_batch(model.batch_size)
    fetches = [model.opt, model.loss_train, model.global_step]
    print('start training: ', datetime.datetime.now())
    loss_ = []
    for i, j in tqdm(zip(slices, np.arange(len(slices)))):
        adj_in, adj_out, alias, item, mask, targets, mask_r, mask_e = train_data.get_slice(
            i, n_node - 1)
        _, loss, _ = model.run(fetches, targets, item, adj_in, adj_out, alias,
                               mask, mask_r, mask_e)
        loss_.append(loss)
    loss = np.mean(loss_)
    slices = test_data.generate_batch(model.batch_size)
    print('start predicting: ', datetime.datetime.now())
    hit, mrr, test_loss_ = [], [], []
    z = zip(slices, np.arange(len(slices)))
    for i, j in tqdm(zip(slices, np.arange(len(slices)))):
        adj_in, adj_out, alias, item, mask, targets, mask_r, mask_e = train_data.get_slice(
            i, n_node - 1)
        scores, test_loss = model.run([model.score_test, model.loss_test],
                                      targets, item, adj_in, adj_out, alias,
                                      mask, mask_r, mask_e)
        test_loss_.append(test_loss)