Exemple #1
0
def load_model(n_items, path):
    model_params = {
        'item_size': n_items,
        'dilated_channels': args.dilated_channels,
        'dilations': [
            1,
            2,
            1,
            2,
            1,
            2,
        ],
        'kernel_size': args.kernel_size,
        'learning_rate': args.learning_rate,
        'batch_size': args.batch_size,
        'is_negsample': False
    }
    itemrec = generator_recsys.NextItNet_Decoder(model_params)
    itemrec.train_graph(model_params['is_negsample'])
    itemrec.predict_graph(model_params['is_negsample'], reuse=True)
    init = tf.global_variables_initializer()
    sess.run(init)
    saver = tf.train.Saver()
    saver.restore(sess, path)
    return itemrec
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--top_k',
                        type=int,
                        default=5,
                        help='Sample from top k predictions')
    parser.add_argument('--beta1',
                        type=float,
                        default=0.9,
                        help='hyperpara-Adam')
    #this is a demo dataset, which just let you run this code, suggest dataset link: http://grouplens.org/datasets/.
    parser.add_argument(
        '--datapath',
        type=str,
        default='Data/Session/user-filter-20000items-session5.csv',
        help='data path')
    parser.add_argument('--eval_iter',
                        type=int,
                        default=1000,
                        help='Sample generator output evry x steps')
    parser.add_argument('--save_para_every',
                        type=int,
                        default=1000,
                        help='save model parameters every')
    parser.add_argument('--tt_percentage',
                        type=float,
                        default=0.2,
                        help='0.2 means 80% training 20% testing')
    parser.add_argument(
        '--is_generatesubsession',
        type=bool,
        default=False,
        help=
        'whether generating a subsessions, e.g., 12345-->01234,00123,00012  It may be useful for very some very long sequences'
    )
    args = parser.parse_args()

    dl = data_loader_recsys.Data_Loader({
        'model_type': 'generator',
        'dir_name': args.datapath
    })
    all_samples = dl.item
    items = dl.item_dict
    print "len(items)", len(items)

    # Randomly shuffle data
    np.random.seed(10)
    shuffle_indices = np.random.permutation(np.arange(len(all_samples)))
    all_samples = all_samples[shuffle_indices]

    # Split train/test set
    dev_sample_index = -1 * int(args.tt_percentage * float(len(all_samples)))
    train_set, valid_set = all_samples[:dev_sample_index], all_samples[
        dev_sample_index:]

    if args.is_generatesubsession:
        x_train = generatesubsequence(train_set)

    model_para = {
        #if you changed the parameters here, also do not forget to change paramters in nextitrec_generate.py
        'item_size': len(items),
        'dilated_channels': 100,  #larger is better until 512 or 1024
        # if you use nextitnet_residual_block, you can use [1, 4, 1, 4, 1,4,],
        # if you use nextitnet_residual_block_one, you can tune and i suggest [1, 2, 4, ], for a trial
        # when you change it do not forget to change it in nextitrec_generate.py
        'dilations': [
            1,
            2,
            1,
            2,
            1,
            2,
            1,
            2,
        ],  #YOU should tune this hyper-parameter, refer to the paper.
        'kernel_size': 3,
        'learning_rate': 0.001,
        'batch_size':
        128,  #YOU should tune this hyper-parameter, options: 32, 64, 128, 256
        'iterations':
        10,  # if your dataset is small, suggest adding regularization to prevent overfitting
        'is_negsample': True  #False denotes no negative sampling
    }

    itemrec = generator_recsys.NextItNet_Decoder(model_para)
    itemrec.train_graph(model_para['is_negsample'])
    optimizer = tf.train.AdamOptimizer(model_para['learning_rate'],
                                       beta1=args.beta1).minimize(itemrec.loss)
    itemrec.predict_graph(model_para['is_negsample'], reuse=True)

    sess = tf.Session()
    init = tf.global_variables_initializer()
    sess.run(init)
    saver = tf.train.Saver()

    numIters = 1
    for iter in range(model_para['iterations']):
        batch_no = 0
        batch_size = model_para['batch_size']
        while (batch_no + 1) * batch_size < train_set.shape[0]:

            start = time.clock()

            item_batch = train_set[batch_no * batch_size:(batch_no + 1) *
                                   batch_size, :]
            _, loss, results = sess.run(
                [optimizer, itemrec.loss, itemrec.arg_max_prediction],
                feed_dict={itemrec.itemseq_input: item_batch})
            end = time.clock()
            if numIters % args.eval_iter == 0:
                print "-------------------------------------------------------train1"
                print "LOSS: {}\tITER: {}\tBATCH_NO: {}\t STEP:{}\t total_batches:{}".format(
                    loss, iter, batch_no, numIters,
                    train_set.shape[0] / batch_size)
                print "TIME FOR BATCH", end - start
                print "TIME FOR ITER (mins)", (end - start) * (
                    train_set.shape[0] / batch_size) / 60.0

            if numIters % args.eval_iter == 0:
                print "-------------------------------------------------------test1"
                if (batch_no + 1) * batch_size < valid_set.shape[0]:
                    item_batch = valid_set[(batch_no) *
                                           batch_size:(batch_no + 1) *
                                           batch_size, :]
                loss = sess.run([itemrec.loss_test],
                                feed_dict={itemrec.input_predict: item_batch})
                print "LOSS: {}\tITER: {}\tBATCH_NO: {}\t STEP:{}\t total_batches:{}".format(
                    loss, iter, batch_no, numIters,
                    valid_set.shape[0] / batch_size)

            batch_no += 1

            if numIters % args.eval_iter == 0:
                batch_no_test = 0
                batch_size_test = batch_size * 1
                curr_preds_5 = []
                rec_preds_5 = []  #1
                ndcg_preds_5 = []  #1
                while (batch_no_test +
                       1) * batch_size_test < valid_set.shape[0]:
                    if (numIters / (args.eval_iter) < 10):
                        if (batch_no_test > 10):
                            break
                    else:
                        if (batch_no_test > 50):
                            break
                    item_batch = valid_set[batch_no_test *
                                           batch_size_test:(batch_no_test +
                                                            1) *
                                           batch_size_test, :]

                    [top_k_batch] = sess.run([itemrec.top_k],
                                             feed_dict={
                                                 itemrec.input_predict:
                                                 item_batch,
                                             })
                    top_k = np.squeeze(top_k_batch[1])
                    for bi in range(top_k.shape[0]):
                        # pred_items_5 = utils.sample_top_k(probs[bi], top_k=args.top_k)#top_k=5
                        # pred_items_20 = utils.sample_top_k(probs[bi], top_k=args.top_k+15)
                        pred_items_5 = top_k[bi][:5]
                        # pred_items_20 = top_k[bi]
                        true_item = item_batch[bi][-1]
                        predictmap_5 = {
                            ch: i
                            for i, ch in enumerate(pred_items_5)
                        }
                        # pred_items_20 = {ch: i for i, ch in enumerate(pred_items_20)}

                        rank_5 = predictmap_5.get(true_item)
                        # rank_20 = pred_items_20.get(true_item)
                        if rank_5 == None:
                            curr_preds_5.append(0.0)
                            rec_preds_5.append(0.0)  # 2
                            ndcg_preds_5.append(0.0)  # 2
                        else:
                            MRR_5 = 1.0 / (rank_5 + 1)
                            Rec_5 = 1.0  # 3
                            ndcg_5 = 1.0 / math.log(rank_5 + 2, 2)  # 3
                            curr_preds_5.append(MRR_5)
                            rec_preds_5.append(Rec_5)  # 4
                            ndcg_preds_5.append(ndcg_5)  # 4

                    batch_no_test += 1
                    if (numIters / (args.eval_iter) < 10):
                        if (batch_no_test == 10):
                            print "mrr_5:", sum(curr_preds_5) / float(
                                len(curr_preds_5)
                            ), "hit_5:", sum(rec_preds_5) / float(
                                len(rec_preds_5)), "ndcg_5:", sum(
                                    ndcg_preds_5) / float(len(ndcg_preds_5))
                    else:
                        if (batch_no_test == 50):
                            print "mrr_5:", sum(curr_preds_5) / float(
                                len(curr_preds_5)
                            ), "hit_5:", sum(rec_preds_5) / float(
                                len(rec_preds_5)), "ndcg_5:", sum(
                                    ndcg_preds_5) / float(len(ndcg_preds_5))
            numIters += 1
            if numIters % args.save_para_every == 0:
                save_path = saver.save(
                    sess,
                    "Data/Models/generation_model/model_nextitnet.ckpt".format(
                        iter, numIters))
Exemple #3
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--top_k',
                        type=int,
                        default=5,
                        help='Sample from top k predictions')
    parser.add_argument('--beta1',
                        type=float,
                        default=0.9,
                        help='hyperpara-Adam')

    parser.add_argument(
        '--datapath',
        type=str,
        default='Data/Session/user-filter-20000items-session5.csv',
        help='data path')
    parser.add_argument('--eval_iter',
                        type=int,
                        default=10,
                        help='Sample generator output evry x steps')
    parser.add_argument('--save_para_every',
                        type=int,
                        default=10,
                        help='save model parameters every')
    parser.add_argument('--tt_percentage',
                        type=float,
                        default=0.5,
                        help='default=0.2 means 80% training 20% testing')
    parser.add_argument(
        '--is_generatesubsession',
        type=bool,
        default=False,
        help=
        'whether generating a subsessions, e.g., 12345-->01234,00123,00012  It may be useful for very some very long sequences'
    )
    args = parser.parse_args()

    dl = data_loader_recsys.Data_Loader({
        'model_type': 'generator',
        'dir_name': args.datapath
    })
    all_samples = dl.item
    items = dl.item_dict
    print "len(items)", len(items)

    # Randomly shuffle data
    np.random.seed(10)
    shuffle_indices = np.random.permutation(np.arange(len(all_samples)))
    all_samples = all_samples[shuffle_indices]

    # Split train/test set
    dev_sample_index = -1 * int(args.tt_percentage * float(len(all_samples)))
    train_set, valid_set = all_samples[:dev_sample_index], all_samples[
        dev_sample_index:]

    model_para = {
        #all parameters shuold be consist with those in nextitred.py!!!!
        'item_size': len(items),
        'dilated_channels': 100,
        'dilations': [
            1,
            2,
        ],
        'kernel_size': 3,
        'learning_rate': 0.001,
        'batch_size': 32,
        'iterations': 2,  #useless, can be removed
        'is_negsample': False  #False denotes no negative sampling
    }

    itemrec = generator_recsys.NextItNet_Decoder(model_para)
    itemrec.train_graph(model_para['is_negsample'])
    itemrec.predict_graph(model_para['is_negsample'], reuse=True)

    sess = tf.Session()
    init = tf.global_variables_initializer()
    sess.run(init)

    saver = tf.train.Saver()
    saver.restore(sess, "Data/Models/generation_model/model_nextitnet.ckpt")

    batch_no_test = 0
    batch_size_test = model_para['batch_size']
    curr_preds_5 = []
    rec_preds_5 = []  # 1
    ndcg_preds_5 = []  # 1
    curr_preds_20 = []
    rec_preds_20 = []  # 1
    ndcg_preds_20 = []  # 1
    while (batch_no_test + 1) * batch_size_test < valid_set.shape[0]:
        text_batch = valid_set[batch_no_test *
                               batch_size_test:(batch_no_test + 1) *
                               batch_size_test, :]
        [probs] = sess.run([itemrec.g_probs],
                           feed_dict={itemrec.input_predict: text_batch})
        for bi in range(probs.shape[0]):
            pred_words_5 = utils.sample_top_k(probs[bi][-1],
                                              top_k=args.top_k)  # top_k=5
            pred_words_20 = utils.sample_top_k(probs[bi][-1],
                                               top_k=args.top_k + 15)

            true_word = text_batch[bi][-1]
            predictmap_5 = {ch: i for i, ch in enumerate(pred_words_5)}
            pred_words_20 = {ch: i for i, ch in enumerate(pred_words_20)}

            rank_5 = predictmap_5.get(true_word)
            rank_20 = pred_words_20.get(true_word)
            if rank_5 == None:
                curr_preds_5.append(0.0)
                rec_preds_5.append(0.0)  # 2
                ndcg_preds_5.append(0.0)  # 2
            else:
                MRR_5 = 1.0 / (rank_5 + 1)
                Rec_5 = 1.0  # 3
                ndcg_5 = 1.0 / math.log(rank_5 + 2, 2)  # 3
                curr_preds_5.append(MRR_5)
                rec_preds_5.append(Rec_5)  # 4
                ndcg_preds_5.append(ndcg_5)  # 4
            if rank_20 == None:
                curr_preds_20.append(0.0)
                rec_preds_20.append(0.0)  # 2
                ndcg_preds_20.append(0.0)  # 2
            else:
                MRR_20 = 1.0 / (rank_20 + 1)
                Rec_20 = 1.0  # 3
                ndcg_20 = 1.0 / math.log(rank_20 + 2, 2)  # 3
                curr_preds_20.append(MRR_20)
                rec_preds_20.append(Rec_20)  # 4
                ndcg_preds_20.append(ndcg_20)  # 4

        batch_no_test += 1
        print "BATCH_NO: {}".format(batch_no_test)
        print "Accuracy mrr_5:", sum(curr_preds_5) / float(
            len(curr_preds_5))  # 5
        print "Accuracy mrr_20:", sum(curr_preds_20) / float(
            len(curr_preds_20))  # 5
        print "Accuracy hit_5:", sum(rec_preds_5) / float(
            len(rec_preds_5))  # 5
        print "Accuracy hit_20:", sum(rec_preds_20) / float(
            len(rec_preds_20))  # 5
        print "Accuracy ndcg_5:", sum(ndcg_preds_5) / float(
            len(ndcg_preds_5))  # 5
        print "Accuracy ndcg_20:", sum(ndcg_preds_20) / float(
            len(ndcg_preds_20))  #
Exemple #4
0
def main(args):
    exps = pd.read_csv('exp.csv')
    cPid = os.getpid()
    train_time = 0
    test_time = 0
    for i, row in exps.iterrows():
        gc.collect()
        args['expname'] = row['name']
        args['sessionid'] = row['sessionid']
        args['itemid'] = row['itemid']
        args['data_folder'] = row['path']
        args['valid_data'] = row['test']
        args['train_data'] = row['train']
        args['freq'] = row['freq']
        args['model_type'] = 'generator'
        print(("\n\n############################################\n"),
              args['train_data'], ' --- ', args['valid_data'])
        with open("LOGGER_" + args['expname'] + ".txt", "a") as myfile:
            myfile.write(row['train'] + ", " + row['test'] + "\n")

        train_data = os.path.join(args['data_folder'], args['train_data'])
        args['dir_name'] = train_data
        dl = data_loader_recsys.Data_Loader(args)
        train_set = dl.item
        items = dl.item_dict
        print("len(train items)", len(items))

        valid_data = os.path.join(args['data_folder'], args['valid_data'])
        args['dir_name'] = valid_data
        vdl = data_loader_recsys.Data_Loader(args,
                                             testFlag=True,
                                             itemsIDs=dl.itemsIDs,
                                             max_doc=dl.max_document_length,
                                             vocab_proc=dl.vocab_processor)
        valid_set = vdl.item
        items2 = vdl.item_dict
        print("len(valid items)", len(items2))
        model_para = {
            #if you changed the parameters here, also do not forget to change paramters in nextitrec_generate.py
            'item_size': len(items),
            'dilated_channels': 100,  #larger is better until 512 or 1024
            # if you use nextitnet_residual_block, you can use [1, 4, 1, 4, 1,4,],
            # if you use nextitnet_residual_block_one, you can tune and i suggest [1, 2, 4, ], for a trial
            # when you change it do not forget to change it in nextitrec_generate.py
            'dilations': [
                1,
                2,
                4,
            ],  #YOU should tune this hyper-parameter, refer to the paper.
            'kernel_size': 3,
            'learning_rate':
            args['learning_rate'],  #YOU should tune this hyper-parameter
            'batch_size':
            int(args['batch_size']),  #YOU should tune this hyper-parameter
            'epochs': args[
                'epochs'],  # if your dataset is small, suggest adding regularization to prevent overfitting
            'is_negsample': False  #False denotes no negative sampling
        }
        tf.compat.v1.reset_default_graph()
        with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):
            itemrec = generator_recsys.NextItNet_Decoder(model_para)
            itemrec.train_graph(model_para['is_negsample'])
            optimizer = tf.compat.v1.train.AdamOptimizer(
                model_para['learning_rate'],
                beta1=args['beta1']).minimize(itemrec.loss)
            itemrec.predict_graph(model_para['is_negsample'], reuse=True)
            sess = tf.Session()
            init = tf.global_variables_initializer()
            sess.run(init)
        for e in range(model_para['epochs']):
            print("\n############################\nEPOCH #:", e)
            batch_no = 0
            batch_size = model_para['batch_size']
            losses = []
            t1 = time.time()
            while (batch_no + 1) * batch_size < train_set.shape[0]:
                batch_no += 1
                item_batch = train_set[(batch_no - 1) * batch_size:(batch_no) *
                                       batch_size, :]
                _, loss, results = sess.run(
                    [optimizer, itemrec.loss, itemrec.arg_max_prediction],
                    feed_dict={itemrec.itemseq_input: item_batch})
                losses.append(loss)
                if batch_no % 100 == 0:
                    print('Finished Batch:', batch_no)

            print('Train Loss:', np.mean(losses), valid_set.shape[0])
            train_time += (time.time() - t1)

            # Report intermediate result
            nni.report_intermediate_result(np.mean(losses))
            logger.debug('train loss %g', np.mean(losses))
            logger.debug('Pipe send intermediate result done.')

            batch_no_test = 0
            batch_size_test = batch_size * 1
            MRR = [[], [], [], [], []]
            Rec = [[], [], [], [], []]
            cov = [[], [], [], [], []]
            pop = [[], [], [], [], []]
            Ks = [1, 3, 5, 10, 20]
            t1 = time.time()
            while (batch_no_test + 1) * batch_size_test < valid_set.shape[0]:
                batch_no_test += 1
                item_batch = valid_set[(batch_no_test - 1) *
                                       batch_size_test:(batch_no_test) *
                                       batch_size_test, :]
                [probs
                 ] = sess.run([itemrec.g_probs],
                              feed_dict={itemrec.input_predict: item_batch})
                for bi in range(probs.shape[0]):
                    true_item = item_batch[bi][-1]
                    if true_item == 1:
                        continue
                    if args['freq'] != 0 and dl.freqs[true_item] > args['freq']:
                        continue
                    for k in range(len(Ks)):
                        pred_items = utils.sample_top_k(probs[bi][-1],
                                                        top_k=Ks[k])
                        predictmap = {ch: i for i, ch in enumerate(pred_items)}
                        print(pred_items, predictmap)
                        for p in pred_items:
                            if p == 1:
                                continue
                            if p not in cov[k]:
                                cov[k].append(p)
                            pop[k].append(dl.freqs[p])
                        rank = predictmap.get(true_item)
                        if rank == None:
                            mrr = 0.0
                            rec = 0.0
                        else:
                            mrr = 1.0 / (rank + 1)
                            rec = 1.0
                        MRR[k].append(mrr)
                        Rec[k].append(rec)
            test_time += (time.time() - t1) / len(Ks)
            Rec[:] = [np.mean(x) for x in Rec]
            MRR[:] = [np.mean(x) for x in MRR]
            cov[:] = [len(x) / len(items) for x in cov]
            maxi = max(dl.freqs.values())
            pop[:] = [np.mean(x) / maxi for x in pop]
            print("MRR@20:", MRR[-1])
            print("Recall@20:", Rec[-1])
            print("Cov@20:", cov[-1])
            print("Pop@20:", pop[-1])

            # Print to the logger
            print("LOGGER_ " + args['expname'])
            print('EPOCH #:' + str(e))
            print(
                str(Rec[0]) + ',' + str(Rec[1]) + ',' + str(Rec[2]) + ',' +
                str(Rec[3]) + ',' + str(Rec[4]) + ',' + str(MRR[0]) + ',' +
                str(MRR[1]) + ',' + str(MRR[2]) + ',' + str(MRR[3]) + ',' +
                str(MRR[4]))
            print("\nCOV:" + str(cov[0]) + ',' + str(cov[1]) + ',' +
                  str(cov[2]) + ',' + str(cov[3]) + ',' + str(cov[4]))
            print("\nPOP:" + str(pop[0]) + ',' + str(pop[1]) + ',' +
                  str(pop[2]) + ',' + str(pop[3]) + ',' + str(pop[4]))
            print("\nTrainTime:" + str(train_time))
            print("\nTestTime:" + str(test_time))
            print("\n############################################\n")

            with open("LOGGER_" + args['expname'] + ".txt", "a") as myfile:
                myfile.write('EPOCH #:' + str(e))
                myfile.write(
                    str(Rec[0]) + ',' + str(Rec[1]) + ',' + str(Rec[2]) + ',' +
                    str(Rec[3]) + ',' + str(Rec[4]) + ',' + str(MRR[0]) + ',' +
                    str(MRR[1]) + ',' + str(MRR[2]) + ',' + str(MRR[3]) + ',' +
                    str(MRR[4]))
                myfile.write("\nCOV:" + str(cov[0]) + ',' + str(cov[1]) + ',' +
                             str(cov[2]) + ',' + str(cov[3]) + ',' +
                             str(cov[4]))
                myfile.write("\nPOP:" + str(pop[0]) + ',' + str(pop[1]) + ',' +
                             str(pop[2]) + ',' + str(pop[3]) + ',' +
                             str(pop[4]))
                myfile.write("\nTrainTime:" + str(train_time))
                myfile.write("\nTestTime:" + str(test_time))
                myfile.write(
                    "\n############################################\n")

    # Report final result
    nni.report_final_result(np.mean(losses))
    logger.debug('Final result %g', np.mean(losses))
    logger.debug('Pipe send intermediate result done.')
Exemple #5
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--top_k',
        type=int,
        default=5,
        help='Sample from top k predictions, used for generating')
    parser.add_argument('--beta1',
                        type=float,
                        default=0.9,
                        help='hyperpara-Adam')
    #history_sequences_20181014_fajie_smalltest.csv    /data/weishi_ai_ceph/fajieyuan/nextitnet-master-2/Data/Session/history_sequences_20181014_fajie.csv
    # /data/weishi_ai_ceph/fajieyuan/nextitnet-master-2/Data/Session/history_sequences_20181014_fajie.index
    # /data/weishi_ai_ceph/fajieyuan/nextitnet-master-2/Data/Models/generation_model/model_nextitnet.pb
    parser.add_argument(
        '--datapath',
        type=str,
        default='Data/Session/user-filter-20000items-session5.csv',
        help='data path')
    parser.add_argument('--datapath_index',
                        type=str,
                        default='Data/Session/',
                        help='data path')
    parser.add_argument('--eval_iter',
                        type=int,
                        default=2000,
                        help='Sample generator output evry x steps')
    parser.add_argument('--save_para_every',
                        type=int,
                        default=2000,
                        help='save model parameters every')
    parser.add_argument('--tt_percentage',
                        type=float,
                        default=0.5,
                        help='0.2 means 80% training 20% testing')
    parser.add_argument(
        '--is_generatesubsession',
        type=bool,
        default=False,
        help=
        'whether generating a subsessions, e.g., 12345-->01234,00123,00012  It may be useful for very some very long sequences'
    )
    args = parser.parse_args()

    dl = data_loader_recsys.Data_Loader({
        'model_type': 'generator',
        'dir_name': args.datapath,
        'dir_name_index': args.datapath_index
    })
    all_samples = dl.item
    items = dl.item_dict
    all_items = items.values()
    print "len(items)", len(items)
    # print all_items

    # Randomly shuffle data
    np.random.seed(10)
    shuffle_indices = np.random.permutation(np.arange(len(all_samples)))
    all_samples = all_samples[shuffle_indices]

    # Split train/test set
    dev_sample_index = -1 * int(args.tt_percentage * float(len(all_samples)))
    train_set, valid_set = all_samples[:dev_sample_index], all_samples[
        dev_sample_index:]

    if args.is_generatesubsession:
        train_set = generatesubsequence(train_set)

    model_para = {
        #if you changed the parameters here, also do not forget to change paramters in nextitrec_generate.py
        'item_size': len(items),
        'dilated_channels': 100,  #200 is usually better
        # if you use nextitnet_residual_block, you can use [1, 4, ],
        # if you use nextitnet_residual_block_one, you can tune and i suggest [1, 2, 4, ], for a trial
        # when you change it do not forget to change it in nextitrec_generate.py
        # if you find removing residual network, the performance does not obviously decrease, then I think your data does not have strong seqeunce. Change a dataset and try again.
        'dilations': [
            1,
            4,
        ],
        'kernel_size': 3,
        'learning_rate': 0.001,
        'batch_size': 32,  #128 is usually better
        'iterations': 100,
        'top_k': args.top_k,
        'is_negsample':
        True  #False denotes no negative sampling. You have to use True if you want to do it based on recalled items
    }

    itemrec = generator_recsys.NextItNet_Decoder(model_para)
    itemrec.train_graph(model_para['is_negsample'])
    optimizer = tf.train.AdamOptimizer(model_para['learning_rate'],
                                       beta1=args.beta1).minimize(itemrec.loss)
    itemrec.predict_graph_onrecall(model_para['is_negsample'], reuse=True)

    sess = tf.Session()
    init = tf.global_variables_initializer()
    sess.run(init)
    saver = tf.train.Saver()

    numIters = 1
    for iter in range(model_para['iterations']):
        batch_no = 0
        batch_size = model_para['batch_size']
        while (batch_no + 1) * batch_size < train_set.shape[0]:

            start = time.clock()

            item_batch = train_set[batch_no * batch_size:(batch_no + 1) *
                                   batch_size, :]
            _, loss, results = sess.run(
                [optimizer, itemrec.loss, itemrec.arg_max_prediction],
                feed_dict={itemrec.itemseq_input: item_batch})
            end = time.clock()
            if numIters % args.eval_iter == 0:
                print "-------------------------------------------------------train1"
                print "LOSS: {}\tITER: {}\tBATCH_NO: {}\t STEP:{}\t total_batches:{}".format(
                    loss, iter, batch_no, numIters,
                    train_set.shape[0] / batch_size)
                print "TIME FOR BATCH", end - start
                print "TIME FOR ITER (mins)", (end - start) * (
                    train_set.shape[0] / batch_size) / 60.0

            # if numIters % args.eval_iter == 0:
            #     print "-------------------------------------------------------test1"
            #     if (batch_no + 1) * batch_size < valid_set.shape[0]:
            #         item_batch = valid_set[(batch_no) * batch_size: (batch_no + 1) * batch_size, :]
            #     loss = sess.run(
            #         [itemrec.loss_test],
            #         feed_dict={
            #             itemrec.input_predict: item_batch
            #         })
            #     print "LOSS: {}\tITER: {}\tBATCH_NO: {}\t STEP:{}\t total_batches:{}".format(
            #         loss, iter, batch_no, numIters, valid_set.shape[0] / batch_size)

            batch_no += 1

            if numIters % args.eval_iter == 0:
                batch_no_test = 0
                batch_size_test = batch_size * 1
                curr_preds_5 = []
                rec_preds_5 = []  #1
                ndcg_preds_5 = []  #1
                curr_preds_20 = []
                rec_preds_20 = []  # 1
                ndcg_preds_20 = []  # 1

                while (batch_no_test +
                       1) * batch_size_test < valid_set.shape[0]:
                    if (numIters / (args.eval_iter) < 10):
                        if (batch_no_test > 20):
                            break
                    else:
                        if (batch_no_test > 500):
                            break
                    item_batch = valid_set[batch_no_test *
                                           batch_size_test:(batch_no_test +
                                                            1) *
                                           batch_size_test, :]
                    allitem_list = []
                    # allitem_batch=[allitem_list[all_items] for i in xrange(batch_size_test)]
                    for i in range(batch_size_test):
                        allitem_list.append(all_items)

                    [top_k] = sess.run(
                        [itemrec.top_k],
                        feed_dict={
                            itemrec.input_predict: item_batch,
                            itemrec.input_recall:
                            allitem_list  #replace it with your recalled items
                        })
                    batch_top_n = []

                    for bi in range(len(allitem_list)):
                        recall_batch_num = allitem_list[bi]
                        top_n = [recall_batch_num[x] for x in top_k[1][bi]]
                        batch_top_n.append(top_n)

                    for bi in range(top_k[1].shape[0]):
                        top_n = batch_top_n[bi]
                        true_item = item_batch[bi][-1]

                        top_n = {ch: i for i, ch in enumerate(top_n)}
                        rank_n = top_n.get(true_item)
                        if rank_n == None:
                            curr_preds_5.append(0.0)
                            rec_preds_5.append(0.0)  # 2
                            ndcg_preds_5.append(0.0)  # 2
                        else:
                            MRR_5 = 1.0 / (rank_n + 1)
                            Rec_5 = 1.0  # 3
                            ndcg_5 = 1.0 / math.log(rank_n + 2, 2)  # 3
                            curr_preds_5.append(MRR_5)
                            rec_preds_5.append(Rec_5)  # 4
                            ndcg_preds_5.append(ndcg_5)

                    batch_no_test += 1
                    print "BATCH_NO: {}".format(batch_no_test)
                    print "Accuracy mrr_5:", sum(curr_preds_5) / float(
                        len(curr_preds_5))  #5
                    # print "Accuracy mrr_20:", sum(curr_preds_20) / float(len(curr_preds_20))  # 5
                    print "Accuracy hit_5:", sum(rec_preds_5) / float(
                        len(rec_preds_5))  #5
                    # print "Accuracy hit_20:", sum(rec_preds_20) / float(len(rec_preds_20))  # 5
                    print "Accuracy ndcg_5:", sum(ndcg_preds_5) / float(
                        len(ndcg_preds_5))  # 5
                    # print "Accuracy ndcg_20:", sum(ndcg_preds_20) / float(len(ndcg_preds_20))  #
                    #print "curr_preds",curr_preds
                # print "---------------------------Test Accuray----------------------------"
            numIters += 1
            if numIters % args.save_para_every == 0:

                save_path = saver.save(
                    sess,
                    "Data/Models/generation_model/model_nextitrec_20190302_lastindex.ckpt"
                    .format(iter, numIters))

                # save_path = saver.save(sess,
                #                        "/data/weishi_ai_ceph/fajieyuan/nextitnet-master-2/Data/Models/generation_model/model_nextitnet.ckpt".format(iter, numIters))

                # print("%d ops in the final graph." % len(tf.get_default_graph().as_graph_def().node))  # 得到当前图有几个操作节点
                # for op in tf.get_default_graph().get_operations():  # 打印模型节点信息
                #     print (op.name, op.values())

                graph_def = tf.get_default_graph().as_graph_def(
                )  # 得到当前的图的 GraphDef 部分,通过这个部分就可以完成重输入层到输出层的计算过程
                output_graph_def = graph_util.convert_variables_to_constants(  # 模型持久化,将变量值固定
                    sess,
                    graph_def,
                    ["input_predict", "top-k"]  # 需要保存节点的名字
                )

                with tf.gfile.GFile(
                        "Data/Models/generation_model/model_nextitrec_20190302_lastindex.pb",
                        "wb") as f:  # 保存模型
                    f.write(output_graph_def.SerializeToString())  # 序列化输出

                print("%d ops in the final graph." %
                      len(output_graph_def.node))
Exemple #6
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--top_k',
                        type=int,
                        default=10,
                        help='Sample from top k predictions')
    parser.add_argument('--beta1',
                        type=float,
                        default=0.9,
                        help='hyperpara-Adam')
    #this is a demo dataset, which just let you run this code, suggest dataset link: http://grouplens.org/datasets/.
    parser.add_argument('--datapath',
                        type=str,
                        default='Data/Session/',
                        help='data path')
    parser.add_argument('--eval_iter',
                        type=int,
                        default=100,
                        help='Sample generator output evry x steps')
    parser.add_argument('--save_para_every',
                        type=int,
                        default=100,
                        help='save model parameters every')
    parser.add_argument('--tt_percentage',
                        type=float,
                        default=0.2,
                        help='0.2 means 80% training 20% testing')
    parser.add_argument("--iterations",
                        type=int,
                        default=2,
                        help='number of training iterations')
    parser.add_argument("--dilated_channels",
                        type=int,
                        default=100,
                        help='number of dilated channels')
    parser.add_argument("--learning_rate",
                        type=float,
                        default=0.008,
                        help='learning rate')
    parser.add_argument("--kernel_size",
                        type=int,
                        default=3,
                        help="kernel size")
    parser.add_argument("--batch_size",
                        type=int,
                        default=300,
                        help="batch size")
    parser.add_argument("--max_seq_size",
                        type=int,
                        default=80,
                        help="max seq len")
    parser.add_argument(
        '--is_generatesubsession',
        type=bool,
        default=False,
        help=
        'whether generating a subsessions, e.g., 12345-->01234,00123,00012  It may be useful for very some very long sequences'
    )
    args = parser.parse_args()

    training_path = args.datapath + "/" + "training.csv"
    model_path = args.datapath + "/" + "model.ckpt"
    vocab_path = args.datapath + "/" + "vocab.pickle"

    dl = data_loader_recsys.Data_Loader(
        {
            'model_type': 'generator',
            'dir_name': training_path
        },
        max_seq_size=args.max_seq_size)
    all_samples = dl.item
    items = dl.item_dict
    print("len(items)")
    print(len(all_samples))

    with open(vocab_path, 'w') as fp:
        json.dump(items, fp)

    with open(vocab_path + "inverted", 'w') as fp:
        json.dump(dl.vocabulary, fp)

    # Randomly shuffle data
    np.random.seed(10)
    shuffle_indices = np.random.permutation(np.arange(len(all_samples)))
    all_samples = all_samples[shuffle_indices]

    # Split train/test set
    dev_sample_index = -1 * int(args.tt_percentage * float(len(all_samples)))
    train_set, valid_set = all_samples[:dev_sample_index], all_samples[
        dev_sample_index:]

    if args.is_generatesubsession:
        x_train = generatesubsequence(train_set)

    model_para = {
        #if you changed the parameters here, also do not forget to change paramters in nextitrec_generate.py
        'item_size': len(items) + 1,
        'dilated_channels':
        args.dilated_channels,  #larger is better until 512 or 1024
        # if you use nextitnet_residual_block, you can use [1, 4, 1, 4, 1,4,],
        # if you use nextitnet_residual_block_one, you can tune and i suggest [1, 2, 4, ], for a trial
        # when you change it do not forget to change it in nextitrec_generate.py
        'dilations': [
            1,
            2,
            1,
            2,
            1,
            2,
        ],  #YOU should tune this hyper-parameter, refer to the paper.
        'kernel_size': args.kernel_size,
        'learning_rate':
        args.learning_rate,  #YOU should tune this hyper-parameter
        'batch_size': args.batch_size,  #YOU should tune this hyper-parameter
        'iterations': args.
        iterations,  # if your dataset is small, suggest adding regularization to prevent overfitting probably bump this to 100
        'is_negsample': False  #False denotes no negative sampling
    }

    itemrec = generator_recsys.NextItNet_Decoder(model_para)
    itemrec.train_graph(model_para['is_negsample'])
    optimizer = tf.train.AdamOptimizer(model_para['learning_rate'],
                                       beta1=args.beta1).minimize(itemrec.loss)
    itemrec.predict_graph(model_para['is_negsample'], reuse=True)

    sess = tf.Session()
    init = tf.global_variables_initializer()
    sess.run(init)
    saver = tf.train.Saver()

    numIters = 1
    for iter in range(model_para['iterations']):
        batch_no = 0
        batch_size = model_para['batch_size']
        while (batch_no + 1) * batch_size < train_set.shape[0]:

            start = time.clock()

            item_batch = train_set[batch_no * batch_size:(batch_no + 1) *
                                   batch_size, :]
            _, loss, results = sess.run(
                [optimizer, itemrec.loss, itemrec.arg_max_prediction],
                feed_dict={itemrec.itemseq_input: item_batch})
            end = time.clock()
            if numIters % args.eval_iter == 0:
                print "-------------------------------------------------------train1"
                print "LOSS: {}\tITER: {}\tBATCH_NO: {}\t STEP:{}\t total_batches:{}".format(
                    loss, iter, batch_no, numIters,
                    train_set.shape[0] / batch_size)
                print "TIME FOR BATCH", end - start
                print "TIME FOR ITER (mins)", (end - start) * (
                    train_set.shape[0] / batch_size) / 60.0

            if numIters % args.eval_iter == 0:
                print "-------------------------------------------------------test1"
                if (batch_no + 1) * batch_size < valid_set.shape[0]:
                    item_batch = valid_set[(batch_no) *
                                           batch_size:(batch_no + 1) *
                                           batch_size, :]
                loss = sess.run([itemrec.loss_test],
                                feed_dict={itemrec.input_predict: item_batch})
                print "LOSS: {}\tITER: {}\tBATCH_NO: {}\t STEP:{}\t total_batches:{}".format(
                    loss, iter, batch_no, numIters,
                    valid_set.shape[0] / batch_size)

            batch_no += 1

            if numIters % args.eval_iter == 0:
                batch_no_test = 0
                batch_size_test = batch_size * 1
                curr_preds_5 = []
                rec_preds_5 = []  #1
                ndcg_preds_5 = []  #1
                curr_preds_20 = []
                rec_preds_20 = []  # 1
                ndcg_preds_20 = []  # 1
                while (batch_no_test +
                       1) * batch_size_test < valid_set.shape[0]:
                    if (numIters / (args.eval_iter) < 10):
                        if (batch_no_test > 20):
                            break
                    else:
                        if (batch_no_test > 500):
                            break
                    item_batch = valid_set[batch_no_test *
                                           batch_size_test:(batch_no_test +
                                                            1) *
                                           batch_size_test, :]
                    [probs] = sess.run(
                        [itemrec.g_probs],
                        feed_dict={itemrec.input_predict: item_batch})
                    for bi in range(probs.shape[0]):
                        pred_items_5 = utils.sample_top_k(
                            probs[bi][-1], top_k=args.top_k)  #top_k=5
                        pred_items_20 = utils.sample_top_k(probs[bi][-1],
                                                           top_k=args.top_k +
                                                           15)

                        true_item = item_batch[bi][-1]
                        predictmap_5 = {
                            ch: i
                            for i, ch in enumerate(pred_items_5)
                        }
                        pred_items_20 = {
                            ch: i
                            for i, ch in enumerate(pred_items_20)
                        }

                        rank_5 = predictmap_5.get(true_item)
                        rank_20 = pred_items_20.get(true_item)
                        if rank_5 == None:
                            curr_preds_5.append(0.0)
                            rec_preds_5.append(0.0)  #2
                            ndcg_preds_5.append(0.0)  #2
                        else:
                            MRR_5 = 1.0 / (rank_5 + 1)
                            Rec_5 = 1.0  #3
                            ndcg_5 = 1.0 / math.log(rank_5 + 2, 2)  # 3
                            curr_preds_5.append(MRR_5)
                            rec_preds_5.append(Rec_5)  #4
                            ndcg_preds_5.append(ndcg_5)  # 4
                        if rank_20 == None:
                            curr_preds_20.append(0.0)
                            rec_preds_20.append(0.0)  #2
                            ndcg_preds_20.append(0.0)  #2
                        else:
                            MRR_20 = 1.0 / (rank_20 + 1)
                            Rec_20 = 1.0  #3
                            ndcg_20 = 1.0 / math.log(rank_20 + 2, 2)  # 3
                            curr_preds_20.append(MRR_20)
                            rec_preds_20.append(Rec_20)  #4
                            ndcg_preds_20.append(ndcg_20)  # 4

                    batch_no_test += 1
                    print "BATCH_NO: {}".format(batch_no_test)
                    print "Accuracy mrr_5:", sum(curr_preds_5) / float(
                        len(curr_preds_5))  #5
                    print "Accuracy mrr_20:", sum(curr_preds_20) / float(
                        len(curr_preds_20))  # 5
                    print "Accuracy hit_5:", sum(rec_preds_5) / float(
                        len(rec_preds_5))  #5
                    print "Accuracy hit_20:", sum(rec_preds_20) / float(
                        len(rec_preds_20))  # 5
                    print "Accuracy ndcg_5:", sum(ndcg_preds_5) / float(
                        len(ndcg_preds_5))  # 5
                    print "Accuracy ndcg_20:", sum(ndcg_preds_20) / float(
                        len(ndcg_preds_20))  #
                    #print "curr_preds",curr_preds
                # print "---------------------------Test Accuray----------------------------"
            numIters += 1
            if numIters % args.save_para_every == 0:
                print("saving..")
                save_path = saver.save(sess, model_path)
Exemple #7
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--top_k',
                        type=int,
                        default=5,
                        help='Sample from top k predictions')
    parser.add_argument('--beta1',
                        type=float,
                        default=0.9,
                        help='hyperpara-Adam')
    #history_sequences_20181014_fajie_smalltest.csv
    parser.add_argument(
        '--datapath',
        type=str,
        default='Data/Session/user-filter-20000items-session5.csv',
        help='data path')
    parser.add_argument('--eval_iter',
                        type=int,
                        default=5000,
                        help='Sample generator output evry x steps')
    parser.add_argument('--save_para_every',
                        type=int,
                        default=10000,
                        help='save model parameters every')
    parser.add_argument('--tt_percentage',
                        type=float,
                        default=0.5,
                        help='0.2 means 80% training 20% testing')
    parser.add_argument(
        '--is_generatesubsession',
        type=bool,
        default=False,
        help=
        'whether generating a subsessions, e.g., 12345-->01234,00123,00012  It may be useful for very some very long sequences'
    )
    args = parser.parse_args()

    dl = data_loader_recsys.Data_Loader({
        'model_type': 'generator',
        'dir_name': args.datapath
    })
    all_samples = dl.item
    items = dl.item_dict
    print "len(items)", len(items)

    # Randomly shuffle data
    np.random.seed(10)
    shuffle_indices = np.random.permutation(np.arange(len(all_samples)))
    all_samples = all_samples[shuffle_indices]

    # Split train/test set
    dev_sample_index = -1 * int(args.tt_percentage * float(len(all_samples)))
    train_set, valid_set = all_samples[:dev_sample_index], all_samples[
        dev_sample_index:]

    if args.is_generatesubsession:
        x_train = generatesubsequence(train_set)

    model_para = {
        #if you changed the parameters here, also do not forget to change paramters in nextitrec_generate.py
        'item_size': len(items),
        'dilated_channels': 100,
        # if you use nextitnet_residual_block, you can use [1, 4, ],
        # if you use nextitnet_residual_block_one, you can tune and i suggest [1, 2, 4, ], for a trial
        # when you change it do not forget to change it in nextitrec_generate.py
        # if you find removing residual network, the performance does not obviously decrease, then I think your data does not have strong seqeunce. Change a dataset and try again.
        'dilations': [
            1,
            2,
        ],
        'kernel_size': 3,
        'learning_rate': 0.001,
        'batch_size': 32,
        'iterations': 400,
        'is_negsample': False  #False denotes no negative sampling
    }

    itemrec = generator_recsys.NextItNet_Decoder(model_para)
    itemrec.train_graph(model_para['is_negsample'])
    optimizer = tf.train.AdamOptimizer(model_para['learning_rate'],
                                       beta1=args.beta1).minimize(itemrec.loss)
    itemrec.predict_graph(model_para['is_negsample'], reuse=True)

    sess = tf.Session()
    init = tf.global_variables_initializer()
    sess.run(init)
    saver = tf.train.Saver()

    numIters = 1
    for iter in range(model_para['iterations']):
        batch_no = 0
        batch_size = model_para['batch_size']
        while (batch_no + 1) * batch_size < train_set.shape[0]:

            start = time.clock()

            item_batch = train_set[batch_no * batch_size:(batch_no + 1) *
                                   batch_size, :]
            _, loss, results = sess.run(
                [optimizer, itemrec.loss, itemrec.arg_max_prediction],
                feed_dict={itemrec.itemseq_input: item_batch})
            end = time.clock()
            if numIters % args.eval_iter == 0:
                print "-------------------------------------------------------train1"
                print "LOSS: {}\tITER: {}\tBATCH_NO: {}\t STEP:{}\t total_batches:{}".format(
                    loss, iter, batch_no, numIters,
                    train_set.shape[0] / batch_size)
                print "TIME FOR BATCH", end - start
                print "TIME FOR ITER (mins)", (end - start) * (
                    train_set.shape[0] / batch_size) / 60.0

            if numIters % args.eval_iter == 0:
                print "-------------------------------------------------------test1"
                if (batch_no + 1) * batch_size < valid_set.shape[0]:
                    item_batch = valid_set[(batch_no) *
                                           batch_size:(batch_no + 1) *
                                           batch_size, :]
                loss = sess.run([itemrec.loss_test],
                                feed_dict={itemrec.input_predict: item_batch})
                print "LOSS: {}\tITER: {}\tBATCH_NO: {}\t STEP:{}\t total_batches:{}".format(
                    loss, iter, batch_no, numIters,
                    valid_set.shape[0] / batch_size)

            batch_no += 1

            if numIters % args.eval_iter == 0:
                batch_no_test = 0
                batch_size_test = batch_size * 1
                curr_preds_5 = []
                rec_preds_5 = []  #1
                ndcg_preds_5 = []  #1
                curr_preds_20 = []
                rec_preds_20 = []  # 1
                ndcg_preds_20 = []  # 1
                while (batch_no_test +
                       1) * batch_size_test < valid_set.shape[0]:
                    if (numIters / (args.eval_iter) < 10):
                        if (batch_no_test > 20):
                            break
                    else:
                        if (batch_no_test > 500):
                            break
                    item_batch = valid_set[batch_no_test *
                                           batch_size_test:(batch_no_test +
                                                            1) *
                                           batch_size_test, :]
                    [probs] = sess.run(
                        [itemrec.g_probs],
                        feed_dict={itemrec.input_predict: item_batch})
                    for bi in range(probs.shape[0]):
                        pred_words_5 = utils.sample_top_k(
                            probs[bi][-1], top_k=args.top_k)  #top_k=5
                        pred_words_20 = utils.sample_top_k(probs[bi][-1],
                                                           top_k=args.top_k +
                                                           15)

                        true_word = item_batch[bi][-1]
                        predictmap_5 = {
                            ch: i
                            for i, ch in enumerate(pred_words_5)
                        }
                        pred_words_20 = {
                            ch: i
                            for i, ch in enumerate(pred_words_20)
                        }

                        rank_5 = predictmap_5.get(true_word)
                        rank_20 = pred_words_20.get(true_word)
                        if rank_5 == None:
                            curr_preds_5.append(0.0)
                            rec_preds_5.append(0.0)  #2
                            ndcg_preds_5.append(0.0)  #2
                        else:
                            MRR_5 = 1.0 / (rank_5 + 1)
                            Rec_5 = 1.0  #3
                            ndcg_5 = 1.0 / math.log(rank_5 + 2, 2)  # 3
                            curr_preds_5.append(MRR_5)
                            rec_preds_5.append(Rec_5)  #4
                            ndcg_preds_5.append(ndcg_5)  # 4
                        if rank_20 == None:
                            curr_preds_20.append(0.0)
                            rec_preds_20.append(0.0)  #2
                            ndcg_preds_20.append(0.0)  #2
                        else:
                            MRR_20 = 1.0 / (rank_20 + 1)
                            Rec_20 = 1.0  #3
                            ndcg_20 = 1.0 / math.log(rank_20 + 2, 2)  # 3
                            curr_preds_20.append(MRR_20)
                            rec_preds_20.append(Rec_20)  #4
                            ndcg_preds_20.append(ndcg_20)  # 4

                    batch_no_test += 1
                    print "BATCH_NO: {}".format(batch_no_test)
                    print "Accuracy mrr_5:", sum(curr_preds_5) / float(
                        len(curr_preds_5))  #5
                    print "Accuracy mrr_20:", sum(curr_preds_20) / float(
                        len(curr_preds_20))  # 5
                    print "Accuracy hit_5:", sum(rec_preds_5) / float(
                        len(rec_preds_5))  #5
                    print "Accuracy hit_20:", sum(rec_preds_20) / float(
                        len(rec_preds_20))  # 5
                    print "Accuracy ndcg_5:", sum(ndcg_preds_5) / float(
                        len(ndcg_preds_5))  # 5
                    print "Accuracy ndcg_20:", sum(ndcg_preds_20) / float(
                        len(ndcg_preds_20))  #
                    #print "curr_preds",curr_preds
                # print "---------------------------Test Accuray----------------------------"
            numIters += 1
            if numIters % args.save_para_every == 0:
                save_path = saver.save(
                    sess,
                    "Data/Models/generation_model/model_nextitnet.ckpt".format(
                        iter, numIters))
Exemple #8
0
        'factor': args.use_embedding_type_factor,
        'seq_len': len(all_samples[0]),
        'pad': dl.padid,
        'parametersharing_type': args.use_parametersharing_type,
    }
    print("in_embed_size", model_para["in_embed_size"])
    print("dilated_channels", model_para["dilated_channels"])
    print("out_embed_size", model_para["out_embed_size"])
    print("dilations", model_para['dilations'])
    print("batch_size", model_para["batch_size"])
    print("block", model_para["block"])
    print("factor", model_para["factor"])
    print("parametersharing_type", model_para["parametersharing_type"])

    # print("seq_len: ", model_para['seq_len'])
    itemrec = generator_recsys.NextItNet_Decoder(model_para)
    itemrec.train_graph() # model_para['is_negsample'])
    optimizer = tf.train.AdamOptimizer(model_para['learning_rate'], beta1=args.beta1).minimize(itemrec.loss)
    itemrec.predict_graph_onrecall(reuse=True) # model_para['is_negsample'],)

    session_config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
    session_config.gpu_options.allow_growth = True

    sess= tf.Session(config=session_config)
    init=tf.global_variables_initializer()
    sess.run(init)
    saver = tf.train.Saver()


    for iter in range(model_para['iterations']):
        batch_no = 0