示例#1
0
                        embedding_len=embedding_len,
                        i_node=i_node)
print(opt)
#%%
# =============================================================================
# model = GGNN(hidden_size=opt.hiddenSize, out_size=opt.hiddenSize, batch_size=opt.batchSize, n_node=n_node,
#                  lr=opt.lr, l2=opt.l2,  step=opt.step, decay=opt.lr_dc_step * len(train_data.inputs) / opt.batchSize, lr_dc=opt.lr_dc,
#                  nonhybrid=opt.nonhybrid)
# print(opt)
# =============================================================================
#%%
best_result = [0, 0]
best_epoch = [0, 0]
for epoch in range(5):
    print('epoch: ', epoch, '===========================================')
    slices = train_data.generate_batch(model.batch_size)  # list of numpy array
    fetches = [model.opt, model.loss_train, model.global_step]
    print('start training: ', datetime.datetime.now())
    loss_ = []
    for i, j in zip(slices, np.arange(len(slices))):
        adj_in, adj_out, alias, item, mask, targets, imps, alias_t = train_data.get_slice(
            i)
        _, loss, _ = model.run(fetches, targets, item, adj_in, adj_out, alias,
                               mask, imps, alias_t)
        loss_.append(loss)
    loss = np.mean(loss_)
    slices = test_data.generate_batch(model.batch_size)
    print('start predicting: ', datetime.datetime.now())
    hit, mrr, test_loss_ = [], [], []
    for i, j in zip(slices, np.arange(len(slices))):
        adj_in, adj_out, alias, item, mask, targets, imps, alias_t = test_data.get_slice(
示例#2
0
文件: main.py 项目: judiebig/LGSR
                model.cide_loss, model.cide_opt, model.global_step_cide
            ]
            train_start = time.time()
            for _, x, y in cide_batch:
                # pdb.set_trace()
                s_loss, _, _ = model.run_cide(fetches_node, x, y)
                e_loss.append(s_loss)
            cost_time = time.time() - train_start
            m_print('Step: %d, Train cide_Loss: %.4f, Cost: %.2f' %
                    (cide_step, np.mean(e_loss), cost_time))
            if abs(pre_cite_loss - np.mean(e_loss)) <= 0.0005:
                early_stop = True
            pre_cite_loss = np.mean(e_loss)

    # pdb.set_trace()
    slices = train_data.generate_batch(opt.batch_size)
    fetches = [model.rec_opt, model.rec_loss, model.global_step]
    m_print('start train: ' +
            time.strftime('%m-%d %H:%M:%S ', time.localtime(time.time())))
    loss_ = []
    for i, j in zip(slices, np.arange(len(slices))):
        batch_input = train_data.get_slice(i)
        _, loss, _ = model.run_rec(fetches, batch_input)
        loss_.append(loss)
    loss = np.mean(loss_)

    slices = test_data.generate_batch(opt.batch_size)
    m_print('start predict:' +
            time.strftime('%m-%d %H:%M:%S ', time.localtime(time.time())))
    hit, mrr, test_loss_, sa_wei, a_wei, ans = [], [], [], [], [], []
    for i, j in zip(slices, np.arange(len(slices))):
示例#3
0
             lr=opt.lr,
             l2=opt.l2,
             step=opt.step,
             decay=opt.lr_dc_step * len(train_data.inputs) / opt.batchSize,
             lr_dc=opt.lr_dc,
             nonhybrid=opt.nonhybrid,
             model_dir=opt.model_dir,
             model_name=opt.model_name)
print(opt)
best_result = [0, 0]
best_epoch = [0, 0]
test_scores_ = []
test_item_ = []
for epoch in range(opt.epoch):
    print('epoch: ', epoch, '===========================================')
    slices = train_data.generate_batch(model.batch_size)  #list[ndarray]
    fetches = [model.opt, model.loss_train, model.global_step]
    print('start training: ', datetime.datetime.now())
    loss_ = []
    for i, j in zip(slices, np.arange(len(slices))):
        adj_in, adj_out, alias, item, mask, targets = train_data.get_slice(i)
        #print(adj_in, adj_out, alias, item, mask, targets)
        _, loss, _ = model.run(fetches, targets, item, adj_in, adj_out, alias,
                               mask)
        loss_.append(loss)
    loss = np.mean(loss_)
    slices = test_data.generate_batch(model.batch_size)
    model.save()
    print('start predicting: ', datetime.datetime.now())
    hit, mrr, test_loss_, test_scores_ = [], [], [], []
    test_item_ = []
示例#4
0
             n_node=n_node,
             lr=opt.lr,
             l2=opt.l2,
             step=opt.step,
             decay=opt.lr_dc_step * len(train_data.inputs) / opt.batchSize,
             lr_dc=opt.lr_dc,
             nonhybrid=opt.nonhybrid)

print(opt)

best_result = [0, 0]
best_epoch = [0, 0]

for epoch in range(opt.epoch):
    print('epoch: ', epoch, '===========================================')
    slices = train_data.generate_batch(model.batch_size)  # 得到每一个batch 的起始索引
    fetches = [model.opt, model.loss_train, model.global_step]
    print('start training: ', datetime.datetime.now())
    loss_ = []
    for i, j in zip(slices, np.arange(len(slices))):
        adj_in, adj_out, alias, item, mask, targets = train_data.get_slice(i)
        _, loss, _ = model.run(fetches, targets, item, adj_in, adj_out, alias,
                               mask)
        loss_.append(loss)

    loss = np.mean(loss_)
    slices = test_data.generate_batch(model.batch_size)
    print('start predicting: ', datetime.datetime.now())
    hit, mrr, test_loss_ = [], [], []
    for i, j in zip(slices, np.arange(len(slices))):
        adj_in, adj_out, alias, item, mask, targets = test_data.get_slice(i)
                    default=10,
                    help='the number of epoch to wait before early stop ')
parser.add_argument('--nonhybrid',
                    action='store_true',
                    help='only use the global preference to predict')
parser.add_argument('--validation', action='store_true', help='validation')
parser.add_argument('--valid_portion',
                    type=float,
                    default=0.1,
                    help='split the portion of training set as validation set')
opt = parser.parse_args()
print(opt)

#test = SessionGraph(opt, 310)
#print(test)  #打印图神经网络的结构

#测试显示会话图
#model = trans_to_cuda(SessionGraph(opt, 310))  #模型构建就靠这句话

#测试读取训练数据的图的节点
#all_train_seq = pickle.load(open('../datasets/' + opt.dataset + '/all_train_seq.txt', 'rb'))
#g = build_graph(all_train_seq)
#print(len(g.node))

#测试
train_data = pickle.load(
    open('../datasets/' + opt.dataset + '/train.txt', 'rb'))
train_data_compare = Data(train_data, shuffle=True)
slices = train_data_compare.generate_batch(100)
slicesData = train_data_compare.get_slice(slices[1])