def load_data(self, args, mode, type):
     if mode == 'train':
         return load_batched_data(train_mfcc_dir, train_label_dir,
                                  batch_size, mode, type)
     elif mode == 'test':
         return load_batched_data(test_mfcc_dir, test_label_dir, batch_size,
                                  mode, type)
     else:
         raise TypeError('mode should be train or test.')
TARGET_PATH = '../TRAIN/All/phone_y/'  #directory of nPhonemes 1-D array .npy files

####Learning Parameters
learningRate = 0.001
momentum = 0.9
nEpochs = 120
batchSize = 128

####Network Parameters
nFeatures = 26  #12 MFCC coefficients + energy, and derivatives
nHidden = 128
nClasses = 40  #39 phonemes, plus the "blank" for CTC

####Load data
print('Loading data')
batchedData, maxTimeSteps, totalN = load_batched_data(INPUT_PATH, TARGET_PATH,
                                                      batchSize)

####Define graph
print('Defining graph')
graph = tf.Graph()
with graph.as_default():

    ####NOTE: try variable-steps inputs and dynamic bidirectional rnn, when it's implemented in tensorflow

    ####Graph input
    inputX = tf.placeholder(tf.float32,
                            shape=(maxTimeSteps, batchSize, nFeatures))
    #Prep input data to fit requirements of rnn.bidirectional_rnn
    #  Reshape to 2-D tensor (nTimeSteps*batchSize, nfeatures)
    inputXrs = tf.reshape(inputX, [-1, nFeatures])
    #  Split to get a list of 'n_steps' tensors of shape (batch_size, n_hidden)
예제 #3
0
def main(_):
    train_mfcc_dir = os.path.join(FLAGS.input_data_dir, FLAGS.level, 'TRAIN',
                                  'mfcc')
    train_label_dir = os.path.join(FLAGS.input_data_dir, FLAGS.level, 'TRAIN',
                                   'label')
    test_mfcc_dir = os.path.join(FLAGS.input_data_dir, FLAGS.level, 'TEST',
                                 'mfcc')
    test_label_dir = os.path.join(FLAGS.input_data_dir, FLAGS.level, 'TEST',
                                  'label')

    savedir = os.path.join(FLAGS.exp_dir, FLAGS.level, 'save')
    resultdir = os.path.join(FLAGS.exp_dir, FLAGS.level, 'result')

    if FLAGS.is_training:
        batched_data, max_time_steps, total_n = load_batched_data(
            train_mfcc_dir, train_label_dir, FLAGS.batch_size, FLAGS.level)
    else:
        batched_data, max_time_steps, total_n = load_batched_data(
            test_mfcc_dir, test_label_dir, FLAGS.batch_size, FLAGS.level)

    hparams = {}
    hparams['level'] = FLAGS.level
    hparams['batch_size'] = FLAGS.batch_size
    hparams['partition_size'] = FLAGS.partition_size
    hparams['num_hidden'] = FLAGS.num_hidden
    hparams['feature_length'] = FLAGS.feature_length
    hparams['num_classes'] = FLAGS.num_classes
    hparams['num_proj'] = FLAGS.num_proj
    hparams['learning_rate'] = FLAGS.learning_rate
    hparams['keep_prob'] = FLAGS.keep_prob
    hparams['clip_gradient_norm'] = FLAGS.clip_gradient_norm
    hparams['use_peepholes'] = FLAGS.use_peepholes
    if FLAGS.activation == 'tanh':
        hparams['activation'] = tf.tanh
    elif FLAGS.activation == 'relu':
        hparams['activation'] = tf.nn.relu
    hparams['max_time_steps'] = max_time_steps
    with tf.Graph().as_default():
        model = DRNN(FLAGS.cell, hparams, FLAGS.is_training)
        train_writer = tf.summary.FileWriter(resultdir + '/train')
        test_writer = tf.summary.FileWriter(resultdir + '/test')
        with tf.Session(FLAGS.master) as sess:
            # restore from stored model
            if FLAGS.restore:
                ckpt = tf.train.get_checkpoint_state(savedir)
                if ckpt and ckpt.model_checkpoint_path:
                    model.saver.restore(sess, ckpt.model_checkpoint_path)
                    print('Model restored from:' + ckpt.model_checkpoint_path)
            else:
                print('Initializing')
                sess.run(model.initial_op)
            train_writer.add_graph(sess.graph)
            for epoch in range(FLAGS.num_epochs):
                ## training
                start = time.time()
                if FLAGS.is_training:
                    print('Epoch', epoch + 1, '...')
                batch_errors = np.zeros(len(batched_data))
                batched_random_idx = np.random.permutation(len(batched_data))

                for batch, batch_original_idx in enumerate(batched_random_idx):
                    batch_inputs, batch_target_sparse, batch_seq_length = batched_data[
                        batch_original_idx]
                    batch_tgt_idx, batch_tgt_vals, batch_tgt_shape = batch_target_sparse
                    feeddict = {
                        model.x: batch_inputs,
                        model.tgt_idx: batch_tgt_idx,
                        model.tgt_vals: batch_tgt_vals,
                        model.tgt_shape: batch_tgt_shape,
                        model.seq_length: batch_seq_length
                    }

                    if FLAGS.is_training and (
                        (epoch * len(batched_random_idx) + batch + 1) % 20 == 0
                            or (epoch == FLAGS.num_epochs - 1
                                and batch == len(batched_random_idx) - 1)):
                        checkpoint_path = os.path.join(savedir, 'model.ckpt')
                        model.saver.save(sess,
                                         checkpoint_path,
                                         global_step=model.global_step)
                        print('Model has been saved in {}'.format(savedir))

                    if FLAGS.level == 'cha':
                        if FLAGS.is_training:
                            _, l, pre, y, er, global_step = sess.run(
                                [
                                    model.train_op, model.loss,
                                    model.predictions, model.y,
                                    model.error_rate, model.global_step
                                ],
                                feed_dict=feeddict)
                            batch_errors[batch] = er
                            if global_step % 10 == 0:
                                log_scalar(train_writer, 'CER',
                                           er / FLAGS.batch_size, global_step)
                                print(
                                    '{} mode, global_step:{}, lr:{}, total:{}, '
                                    'batch:{}/{},epoch:{}/{},train loss={:.3f},mean train '
                                    'CER={:.3f}'.format(
                                        FLAGS.level, global_step,
                                        FLAGS.learning_rate,
                                        total_n, batch + 1,
                                        len(batched_random_idx), epoch + 1,
                                        FLAGS.num_epochs, l,
                                        er / FLAGS.batch_size))

                        elif not FLAGS.is_training:
                            l, pre, y, er, global_step = sess.run(
                                [
                                    model.loss, model.predictions, model.y,
                                    model.error_rate, model.global_step
                                ],
                                feed_dict=feeddict)
                            batch_errors[batch] = er
                            log_scalar(test_writer, 'CER',
                                       er / FLAGS.batch_size, global_step)
                            print(
                                '{} mode, global_step:{}, total:{}, batch:{}/{},test '
                                'loss={:.3f},mean test CER={:.3f}'.format(
                                    FLAGS.level, global_step, total_n,
                                    batch + 1, len(batched_random_idx), l,
                                    er / FLAGS.batch_size))

                    elif FLAGS.level == 'phn':
                        if FLAGS.is_training:
                            _, l, pre, y, global_step = sess.run(
                                [
                                    model.train_op, model.loss,
                                    model.predictions, model.y,
                                    model.global_step
                                ],
                                feed_dict=feeddict)
                            er = get_edit_distance([pre.values], [y.values],
                                                   True, FLAGS.level)
                            if global_step % 10 == 0:
                                log_scalar(train_writer, 'PER', er,
                                           global_step)
                                print(
                                    '{} mode, global_step:{}, lr:{}, total:{}, '
                                    'batch:{}/{},epoch:{}/{},train loss={:.3f},mean train '
                                    'PER={:.3f}'.format(
                                        FLAGS.level, global_step,
                                        FLAGS.learning_rate,
                                        total_n, batch + 1,
                                        len(batched_random_idx), epoch + 1,
                                        FLAGS.num_epochs, l, er))
                            batch_errors[batch] = er * len(batch_seq_length)
                        elif not FLAGS.is_training:
                            l, pre, y, global_step = sess.run(
                                [
                                    model.loss, model.predictions, model.y,
                                    model.global_step
                                ],
                                feed_dict=feeddict)
                            er = get_edit_distance([pre.values], [y.values],
                                                   True, FLAGS.level)
                            log_scalar(test_writer, 'PER', er, global_step)
                            print(
                                '{} mode, global_step:{}, total:{}, batch:{}/{},test '
                                'loss={:.3f},mean test PER={:.3f}'.format(
                                    FLAGS.level, global_step, total_n,
                                    batch + 1, len(batched_random_idx), l, er))
                            batch_errors[batch] = er * len(batch_seq_length)

                    # NOTE:
                    if er / FLAGS.batch_size == 1.0:
                        break

                    if batch % 100 == 0:
                        print('Truth:\n' +
                              output_to_sequence(y, level=FLAGS.level))
                        print('Output:\n' +
                              output_to_sequence(pre, level=FLAGS.level))

                end = time.time()
                delta_time = end - start
                print('Epoch ' + str(epoch + 1) + ' needs time:' +
                      str(delta_time) + ' s')

                if FLAGS.is_training:
                    if (epoch + 1) % 1 == 0:
                        checkpoint_path = os.path.join(savedir, 'model.ckpt')
                        model.saver.save(sess,
                                         checkpoint_path,
                                         global_step=model.global_step)
                        print('Model has been saved in {}'.format(savedir))
                    epoch_er = batch_errors.sum() / total_n
                    print('Epoch', epoch + 1, 'mean train error rate:',
                          epoch_er)

                if not FLAGS.is_training:
                    with tf.gfile.GFile(
                            os.path.join(resultdir,
                                         FLAGS.level + '_result.txt'),
                            'a') as result:
                        result.write(
                            output_to_sequence(y, level=FLAGS.level) + '\n')
                        result.write(
                            output_to_sequence(pre, level=FLAGS.level) + '\n')
                        result.write('\n')
                    epoch_er = batch_errors.sum() / total_n
                    print(' test error rate:', epoch_er)
TARGET_PATH = './sample_data/char_y/' #directory of nCharacters 1-D array .npy files

####Learning Parameters
learningRate = 0.001
momentum = 0.9
nEpochs = 300
batchSize = 4

####Network Parameters
nFeatures = 26 #12 MFCC coefficients + energy, and derivatives
nHidden = 128
nClasses = 28#27 characters, plus the "blank" for CTC

####Load data
print('Loading data')
batchedData, maxTimeSteps, totalN = load_batched_data(INPUT_PATH, TARGET_PATH, batchSize)

####Define graph
print('Defining graph')
graph = tf.Graph()
with graph.as_default():

    ####NOTE: try variable-steps inputs and dynamic bidirectional rnn, when it's implemented in tensorflow
        
    ####Graph input
    inputX = tf.placeholder(tf.float32, shape=(maxTimeSteps, batchSize, nFeatures))
    #Prep input data to fit requirements of rnn.bidirectional_rnn
    #  Reshape to 2-D tensor (nTimeSteps*batchSize, nfeatures)
    inputXrs = tf.reshape(inputX, [-1, nFeatures])
    #  Split to get a list of 'n_steps' tensors of shape (batch_size, n_hidden)
    inputList = tf.split(0, maxTimeSteps, inputXrs)
 def load_data(self, feature_dir, label_dir, mode, level):
     return load_batched_data(feature_dir, label_dir, batch_size, mode,
                              level)
예제 #6
0
    def train(self):

        print("[INFO]: Data Loaded")
        model = SimpleModel(cfg)
        print("[INFO]: Building Graph ...")
        model.build_graph()

        num_batches = int(len(self.data[0]) / cfg.batch_size)
        print("[INFO]: Number of batches are %d " % num_batches)

        with tf.Session(graph=model.graph) as sess:
            writer = tf.summary.FileWriter("logging", graph=model.graph)

            if (cfg.restore == 1):
                model.saver.restore(
                    sess, tf.train.latest_checkpoint(cfg.checkpoint_dir))
            else:
                sess.run(model.initial_op)
            total_loss = 0.
            avg_loss = 0.
            for iteration in range(cfg.num_iters):
                X_tuple = next(
                    load_batched_data(self.data, cfg.batch_size, cfg))
                _, loss, summary = sess.run(
                    [model.opt, model.loss, model.summary_op],
                    feed_dict={
                        model.name: X_tuple[0],
                        model.item_condition_id: X_tuple[1],
                        model.category_id: X_tuple[2],
                        model.brand_id: X_tuple[3],
                        model.shipping: X_tuple[4],
                        model.item_description: X_tuple[5],
                        model.target_price: X_tuple[-1]
                    })

                writer.add_summary(summary, iteration)
                total_loss = total_loss + loss
                avg_loss = total_loss / (iteration + 1)
                if (iteration != 0 & iteration % 50 == 0):
                    print("[Info]: Iter:%d/%d , avg_loss: %f " %
                          (iteration + 1, cfg.num_iters, avg_loss))

                if (iteration % 100 == 0):
                    model.saver.save(sess,
                                     cfg.checkpoint_dir + cfg.dataset +
                                     "_SimpleModel.ckpt",
                                     global_step=model.global_step)

                # Test after every 500 iterations
                if (iteration % 500 == 0):
                    total_val_loss = 0.
                    for iteration in range(
                            len(self.val_data[0]) // cfg.batch_size):
                        test_tuple = next(
                            load_batched_data(self.val_data, cfg.batch_size,
                                              cfg))
                        _, val_loss = sess.run(
                            [model.pred_price, model.loss],
                            feed_dict={
                                model.name: test_tuple[0],
                                model.item_condition_id: test_tuple[1],
                                model.category_id: test_tuple[2],
                                model.brand_id: test_tuple[3],
                                model.shipping: test_tuple[4],
                                model.item_description: test_tuple[5],
                                model.target_price: test_tuple[-1]
                            })
                        total_val_loss = total_val_loss + val_loss
                    avg_val_loss = total_val_loss / (iteration + 1)
                    print("Test RMSE: %f" % avg_val_loss)
예제 #7
0
def main(_):
    print('%s mode...' % str(FLAGS.mode))
    savedir = os.path.join(FLAGS.exp_dir, FLAGS.level, 'save')
    resultdir = os.path.join(FLAGS.exp_dir, FLAGS.level, 'result')
    check_path_exists([savedir, resultdir])
    # load data
    hparams = {}
    hparams['level'] = FLAGS.level
    hparams['batch_size'] = FLAGS.batch_size
    hparams['partition_size'] = FLAGS.partition_size
    hparams['num_hidden'] = FLAGS.num_hidden
    hparams['feature_length'] = FLAGS.feature_length
    hparams['num_classes'] = FLAGS.num_classes
    hparams['num_proj'] = FLAGS.num_proj
    hparams['learning_rate'] = FLAGS.learning_rate
    hparams['keep_prob'] = FLAGS.keep_prob
    hparams['clip_gradient_norm'] = FLAGS.clip_gradient_norm
    hparams['use_peepholes'] = FLAGS.use_peepholes
    if FLAGS.activation == 'tanh':
        hparams['activation'] = tf.tanh
    elif FLAGS.activation == 'relu':
        hparams['activation'] = tf.nn.relu
    feature_dirs, label_dirs = get_data(FLAGS.input_data_dir, FLAGS.level,
                                        FLAGS.train_dataset, FLAGS.dev_dataset,
                                        FLAGS.test_dataset, FLAGS.mode)
    batched_data, max_time_steps, total_n = load_batched_data(
        feature_dirs[0], label_dirs[0], FLAGS.batch_size, FLAGS.level)
    hparams['max_time_steps'] = max_time_steps
    ## shuffle feature_dir and label_dir by same order
    FL_pair = list(zip(feature_dirs, label_dirs))
    random.shuffle(FL_pair)
    feature_dirs, label_dirs = zip(*FL_pair)
    train_writer = tf.summary.FileWriter(resultdir + '/train')
    test_writer = tf.summary.FileWriter(resultdir + '/test')

    for feature_dir, label_dir in zip(feature_dirs, label_dirs):
        id_dir = feature_dirs.index(feature_dir)
        print('dir id:{}'.format(id_dir))
        batched_data, max_time_steps, total_n = load_batched_data(
            feature_dir, label_dir, FLAGS.batch_size, FLAGS.level)
        hparams['max_time_steps'] = max_time_steps
        model = DRNN(FLAGS.cell, hparams, FLAGS.mode == 'train')

        with tf.Session(FLAGS.master) as sess:
            # restore from stored model
            if FLAGS.restore:
                ckpt = tf.train.get_checkpoint_state(savedir)
                if ckpt and ckpt.model_checkpoint_path:
                    model.saver.restore(sess, ckpt.model_checkpoint_path)
                    print('Model restored from:' + savedir)
            else:
                print('Initializing')
                sess.run(model.initial_op)

            for epoch in range(FLAGS.num_epochs):
                ## training
                start = time.time()
                if FLAGS.mode == 'train':
                    print('Epoch {} ...'.format(epoch + 1))

                batch_errors = np.zeros(len(batched_data))
                batched_random_idx = np.random.permutation(len(batched_data))

                for batch, batch_original_idx in enumerate(batched_random_idx):
                    batch_inputs, batch_target_sparse, batch_seq_length = batched_data[
                        batch_original_idx]
                    batch_tgt_idx, batch_tgt_vals, batch_tgt_shape = batch_target_sparse
                    feedDict = {
                        model.x: batch_inputs,
                        model.tgt_idx: batch_tgt_idx,
                        model.tgt_vals: batch_tgt_vals,
                        model.tgt_shape: batch_tgt_shape,
                        model.seq_length: batch_seq_length
                    }

                    if FLAGS.level == 'cha':
                        if FLAGS.mode == 'train':
                            _, l, pre, y, er = sess.run([
                                model.train_op, model.loss, model.predictions,
                                model.y, model.error_rate
                            ],
                                                        feed_dict=feedDict)

                            batch_errors[batch] = er
                            print(
                                '\n{} mode, total:{},subdir:{}/{},batch:{}/{},epoch:{}/{},train loss={:.3f},mean train CER={:.3f}\n'
                                .format(FLAGS.level, total_n, id_dir + 1,
                                        len(feature_dirs), batch + 1,
                                        len(batched_random_idx), epoch + 1,
                                        FLAGS.num_epochs, l,
                                        er / FLAGS.batch_size))

                        elif FLAGS.mode == 'dev':
                            l, pre, y, er = sess.run([
                                model.loss, model.predictions, model.y,
                                model.error_rate
                            ],
                                                     feed_dict=feedDict)
                            batch_errors[batch] = er
                            print(
                                '\n{} mode, total:{},subdir:{}/{},batch:{}/{},dev loss={:.3f},mean dev CER={:.3f}\n'
                                .format(FLAGS.level, total_n, id_dir + 1,
                                        len(feature_dirs), batch + 1,
                                        len(batched_random_idx), l,
                                        er / FLAGS.batch_size))

                        elif FLAGS.mode == 'test':
                            l, pre, y, er = sess.run([
                                model.loss, model.predictions, model.y,
                                model.error_rate
                            ],
                                                     feed_dict=feedDict)
                            batch_errors[batch] = er
                            print(
                                '\n{} mode, total:{},subdir:{}/{},batch:{}/{},test loss={:.3f},mean test CER={:.3f}\n'
                                .format(FLAGS.level, total_n, id_dir + 1,
                                        len(feature_dirs), batch + 1,
                                        len(batched_random_idx), l,
                                        er / FLAGS.batch_size))
                    elif FLAGS.level == 'seq2seq':
                        raise ValueError('level %s is not supported now' %
                                         str(FLAGS.level))

                    # NOTE:
                    if er / FLAGS.batch_size == 1.0:
                        break

                    if batch % 20 == 0:
                        print('Truth:\n' +
                              output_to_sequence(y, level=FLAGS.level))
                        print('Output:\n' +
                              output_to_sequence(pre, level=FLAGS.level))

                    if FLAGS.mode == 'train' and (
                        (epoch * len(batched_random_idx) + batch + 1) % 20 == 0
                            or (epoch == FLAGS.num_epochs - 1
                                and batch == len(batched_random_idx) - 1)):
                        checkpoint_path = os.path.join(savedir, 'model.ckpt')
                        model.saver.save(sess,
                                         checkpoint_path,
                                         global_step=epoch)
                        print('Model has been saved in {}'.format(savedir))

                end = time.time()
                delta_time = end - start
                print('Epoch ' + str(epoch + 1) + ' needs time:' +
                      str(delta_time) + ' s')

                if FLAGS.mode == 'train':
                    if (epoch + 1) % 1 == 0:
                        checkpoint_path = os.path.join(savedir, 'model.ckpt')
                        model.saver.save(sess,
                                         checkpoint_path,
                                         global_step=epoch)
                        print('Model has been saved in {}'.format(savedir))
                    epoch_er = batch_errors.sum() / total_n
                    print('Epoch', epoch + 1, 'mean train error rate:',
                          epoch_er)

                if FLAGS.mode == 'test' or FLAGS.mode == 'dev':
                    with open(
                            os.path.join(resultdir,
                                         FLAGS.level + '_result.txt'),
                            'a') as result:
                        result.write(
                            output_to_sequence(y, level=FLAGS.level) + '\n')
                        result.write(
                            output_to_sequence(pre, level=FLAGS.level) + '\n')
                        result.write('\n')
                    epoch_er = batch_errors.sum() / total_n
                    print(' test error rate:', epoch_er)
예제 #8
0
 def load_data(self, batch_size, b, e, d, target_gaps):
     return load_batched_data(batch_size, b, e, d, target_gaps)
예제 #9
0
    def run(self, user):
        args_dict = self._default_configs()
        args = dotdict(args_dict)

        #get data
        #print("Loading data")
        train_dataset = "../data/dataset_" + str(user) + "_train.pickle"
        test_dataset = "../data/dataset_" + str(user) + "_test.pickle"
        b, e, d, g, target_gaps = get_data(train_dataset)
        b_t, e_t, d_t, g_t, target_gaps_t = get_data(test_dataset)
        #print(len(b))
        #print(type(g))
        #g = np.vstack(g)
        #print(b.shape)
        totalN = len(b)
        #print("b :",len(b))
        num_batches = len(b) / batch_size
        maxLength = 0
        for x in b:
            maxLength = max(maxLength, x.shape[0])
        maxSessLen = maxLength

        print("Building Model")
        recom = Model(args, maxSessLen)
        recom.build_graph(args, maxSessLen)
        print("Starting Session")
        #print(recom.config)
        test_Err = []
        test_MAE = []
        with tf.Session(graph=recom.graph) as sess:
            if (mode == 'train'):
                writer = tf.summary.FileWriter("loggingdir", graph=recom.graph)
                sess.run(recom.initial_op)
                for epoch in range(num_epochs):
                    # training
                    start = time.time()
                    print('Epoch {} ...'.format(epoch + 1))
                    batchLoss = np.zeros(num_batches)
                    batchErr = np.zeros(num_batches)
                    batchErrMAE = np.zeros(num_batches)
                    batchRandIxs = np.random.permutation(num_batches)
                    for batch, batchOrigI in enumerate(batchRandIxs):
                        batchInputs_b, batchInputs_e, batchInputs_d, batchInputs_g, batchTargetList, batchSeqLengths = next(
                            load_batched_data(batch_size, b, e, d, g,
                                              target_gaps))
                        #print(type(batchInputs_g))
                        feedDict = {
                            recom.inputb: batchInputs_b,
                            recom.inpute: batchInputs_e,
                            recom.inputg: batchInputs_g,
                            recom.inputd: batchInputs_d,
                            recom.target_gaps: batchTargetList,
                            recom.sessLengths: batchSeqLengths
                        }

                        _, l, lamb = sess.run(
                            [recom.optimizer, recom.loss, recom.lamb],
                            feed_dict=feedDict)
                        #writer.add_summary(summary,epoch*num_batches+batch)

                        batchLoss[batch] = l

                        predicted_gaps = self.predict_gaps(
                            lamb, batchSeqLengths, maxSessLen)
                        error = self.MSE(predicted_gaps, batchTargetList)
                        error_MAE = self.MAE(predicted_gaps, batchTargetList)
                        batchErr[batch] = error
                        batchErrMAE[batch] = error_MAE
                        #print(len(batchTargetList))
                        print(
                            'batch:{}/{},epoch:{}/{},train loss={:.3f},RMSE={:.3f},MAE={:.3f}'
                            .format(batch + 1, len(batchRandIxs), epoch + 1,
                                    num_epochs, l, error, error_MAE))

                        #print(predicted_gaps)
                        #print(batchTargetList)

                    print("MAE after %d epoch is %.2f" %
                          (epoch + 1, np.mean(batchErrMAE)))

                    end = time.time()
                    delta_time = end - start
                    print('Average loss of the epoch is %.2f' %
                          np.mean(batchLoss))
                    print('Epoch ' + str(epoch + 1) + ' needs time:' +
                          str(delta_time) + ' s')

                    #Testing after ever epoch
                    num_b = len(b_t) / batch_size
                    batchErr = np.zeros(num_b)
                    batchErrMAE = np.zeros(num_b)
                    batchRandIxs = np.random.permutation(num_b)
                    for batch, batchOrigI in enumerate(batchRandIxs):
                        batchInputs_b, batchInputs_e, batchInputs_d, batchInputs_g, batchTargetList, batchSeqLengths = next(
                            load_batched_data_test(maxSessLen, batch_size, b_t,
                                                   e_t, d_t, g_t,
                                                   target_gaps_t))
                        feedDict = {
                            recom.inputb: batchInputs_b,
                            recom.inpute: batchInputs_e,
                            recom.inputg: batchInputs_g,
                            recom.inputd: batchInputs_d,
                            recom.target_gaps: batchTargetList,
                            recom.sessLengths: batchSeqLengths
                        }
                        __, l, lamb = sess.run(
                            [recom.optimizer, recom.loss, recom.lamb],
                            feed_dict=feedDict)

                        predicted_gaps = self.predict_gaps(
                            lamb, batchSeqLengths, maxSessLen)
                        error = self.MSE(predicted_gaps, batchTargetList)
                        error_MAE = self.MAE(predicted_gaps, batchTargetList)
                        batchErr[batch] = error
                        batchErrMAE[batch] = error_MAE

                    test_Err.append(np.mean(batchErr))
                    test_MAE.append(np.mean(batchErrMAE))
                    #print("RMSE error of test set is %.2f"% np.mean(batchErr))
                    print("MAE error of test set is %.2f\n" %
                          np.mean(batchErrMAE))
        return test_Err, test_MAE