def step1(args): # training variational autoencoder
    train_id_path, dev_id_path, vocab_path = data_utils.prepare_diologue(args.works_dir, args.vocab_size)

    checkpoint_step = 200
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=1)
    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
        print ()
        print ('-------loading model-------')
        model = v_autoencoder(sess, args, feed_previous=False)
        ckpt = tf.train.get_checkpoint_state(args.model_dir)
        if ckpt and ckpt.model_checkpoint_path:
            print("Reading model parameters from %s @ %s" % (ckpt.model_checkpoint_path, datetime.now()))
            sess.run(tf.global_variables_initializer())
            model.saver.restore(sess, ckpt.model_checkpoint_path)
            print("Model reloaded @ %s" % (datetime.now()))
        else:
            print("Created model with fresh parameters.")
            sess.run(tf.global_variables_initializer())

        print ('loading dictionary...')
        vocab, rev_vocab = data_utils.initialize_vocab(vocab_path)

        print ('loading dev set form %s...' %dev_id_path)
        dev_set = data_utils.read_data(dev_id_path)
        print ('loading train set form %s...' %train_id_path)
        train_set = data_utils.read_data(train_id_path)
        print ('-------start training-------')
        seq_loss, kl_loss = 0.0, 0.0
        current_step = 0
        previous_losses = []

        while True:
            encoder_inputs, decoder_inputs, weights = model.train_get_batch(train_set)
            step_seq_loss, step_kl_loss = model.ae_step(sess, encoder_inputs, decoder_inputs, weights, False)
            sess.run(model.kl_weight_op)
            sess.run(model.sample_rate_op)
            seq_loss += step_seq_loss / checkpoint_step
            kl_loss  += step_kl_loss / checkpoint_step
            current_step += 1
            if current_step % checkpoint_step == 0:
                print ("global step %d seq_loss %.4f kl_loss %.4f @ %s" %(model.global_step.eval(), math.exp(seq_loss), kl_loss, datetime.now()))

                checkpoint_path = os.path.join(args.model_dir, "model.ckpt")
                model.saver.save(sess, checkpoint_path )
                seq_loss, kl_loss = 0.0, 0.0
                encoder_inputs, decoder_inputs, weights = model.train_get_batch(dev_set)
                step_seq_loss, step_kl_loss = model.ae_step(sess, encoder_inputs, decoder_inputs, weights, True)
                print ("  eval: seq_loss %.2f" %(math.exp(step_seq_loss)))
                sys.stdout.flush()
示例#2
0
    def test(self):
        print ('---------prepare data---------')
        data_id_path, vocab_path = data_utils.prepare_test_data(self.works_dir, self.data_path, self.vocab_size)
        print ('loading dictionary...')
        vocab, rev_vocab = data_utils.initialize_vocab(vocab_path)
        self.sess.run(tf.global_variables_initializer())
        ckpt = tf.train.get_checkpoint_state(self.works_dir)
        if ckpt and ckpt.model_checkpoint_path:
            print("Reading model parameters from %s @ %s" % (ckpt.model_checkpoint_path, datetime.now()))
            self.saver.restore(self.sess, ckpt.model_checkpoint_path)
            print("Model reloaded @ %s" % (datetime.now()))
        else:
            raise ValueError("can't find the path" )

        print ('loading data set form %s...' %data_id_path)
        data_set = data_utils.read_data(data_id_path, 1.0)
        fout = open(os.path.join(self.works_dir, 'seq2seq_pos.txt'), 'w')
        for batch_encoder_inputs, _, i in self.get_batch_epoch(data_set, data_set):
            _, _, fake_pos_id, _ = self.get_sentence(self.sess, batch_encoder_inputs)
            data_size = self.batch_size
            if i + self.batch_size > len(data_set):
                data_size = len(data_set) - i
            print ('\r', data_size, i, end='')

            for t in range(data_size):
                fout.write(self.print_sentence(fake_pos_id, rev_vocab, t)+'\n')
示例#3
0
def train(args):
    print("[%s] Preparing dialog data in %s" %
          (args.model_name, args.data_dir))
    setup_workpath(workspace=args.workspace)
    train_data, dev_data, _ = data_utils.prepare_dialog_data(
        args.data_dir, args.vocab_size)

    #### GET DATA ###### inti beer
    def get_gold(workspace=args.workspace):
        data_dir = "%s/data" % (workspace)
        full_path = str(sys.path[-1]) + "/" + data_dir + "/train/chat.txt.gz"
        print(full_path)
        with gzip.open(full_path, 'rb') as zi:
            test_sentences = zi.read()
            test_sentences = test_sentences.decode().split("\n")
            zi.close()
        return test_sentences

    ######get data
    data_ = get_gold()

    if args.reinforce_learn:
        args.batch_size = 1  # We decode one sentence at a time.

    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_usage)
    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:

        # Create model.
        print("Creating %d layers of %d units." % (args.num_layers, args.size))
        model = seq2seq_model_utils.create_model(sess,
                                                 args,
                                                 forward_only=False)

        # Read data into buckets and compute their sizes.
        print("Reading development and training data (limit: %d)." %
              args.max_train_data_size)
        dev_set = data_utils.read_data(dev_data,
                                       args.buckets,
                                       reversed=args.rev_model)
        train_set = data_utils.read_data(train_data,
                                         args.buckets,
                                         args.max_train_data_size,
                                         reversed=args.rev_model)
        train_bucket_sizes = [
            len(train_set[b]) for b in xrange(len(args.buckets))
        ]
        train_total_size = float(sum(train_bucket_sizes))

        # A bucket scale is a list of increasing numbers from 0 to 1 that we'll use
        # to select a bucket. Length of [scale[i], scale[i+1]] is proportional to
        # the size if i-th training bucket, as used later.
        train_buckets_scale = [
            sum(train_bucket_sizes[:i + 1]) / train_total_size
            for i in xrange(len(train_bucket_sizes))
        ]

        # This is the training loop.
        step_time, loss = 0.0, 0.0
        current_step = 0
        previous_losses = []

        # Load vocabularies.
        vocab_path = os.path.join(args.data_dir,
                                  "vocab%d.in" % args.vocab_size)
        vocab, rev_vocab = data_utils.initialize_vocabulary(vocab_path)

        while True:
            # Choose a bucket according to data distribution. We pick a random number
            # in [0, 1] and use the corresponding interval in train_buckets_scale.
            random_number_01 = np.random.random_sample()
            bucket_id = min([
                i for i in xrange(len(train_buckets_scale))
                if train_buckets_scale[i] > random_number_01
            ])

            # Get a batch and make a step.
            start_time = time.time()
            encoder_inputs, decoder_inputs, target_weights, encoder_input, decoder_input = model.get_batch(
                train_set, bucket_id)

            print("[shape]", np.shape(encoder_inputs),
                  np.shape(decoder_inputs), np.shape(target_weights))

            if args.reinforce_learn:
                _, step_loss, _ = model.step_rf(args,
                                                sess,
                                                encoder_inputs,
                                                decoder_inputs,
                                                target_weights,
                                                bucket_id,
                                                data_,
                                                encoder_input,
                                                decoder_input,
                                                rev_vocab=rev_vocab,
                                                forward_only=False)
            else:
                _, step_loss, _ = model.step(sess,
                                             encoder_inputs,
                                             decoder_inputs,
                                             target_weights,
                                             bucket_id,
                                             forward_only=False,
                                             force_dec_input=True)

            step_time += (time.time() - start_time) / args.steps_per_checkpoint
            loss += step_loss / args.steps_per_checkpoint
            current_step += 1
            print("Current step: " + str(current_step))
            # Once in a while, we save checkpoint, print statistics, and run evals.
            if current_step % args.steps_per_checkpoint == 0:  #and (not args.reinforce_learn):
                # Print statistics for the previous epoch.
                perplexity = math.exp(loss) if loss < 300 else float('inf')
                print(
                    "global step %d learning rate %.4f step-time %.2f perplexity %.2f @ %s"
                    % (model.global_step.eval(), model.learning_rate.eval(),
                       step_time, perplexity, datetime.now()))

                # Decrease learning rate if no improvement was seen over last 3 times.
                if len(previous_losses) > 2 and loss > max(
                        previous_losses[-3:]):
                    sess.run(model.learning_rate_decay_op)

                previous_losses.append(loss)

                # # Save checkpoint and zero timer and loss.
                checkpoint_path = os.path.join(args.model_dir, "model.ckpt")
                model.saver.save(sess,
                                 checkpoint_path,
                                 global_step=model.global_step)
                step_time, loss = 0.0, 0.0

                # Run evals on development set and print their perplexity.
                for bucket_id in xrange(len(args.buckets)):
                    encoder_inputs, decoder_inputs, target_weights, _, _ = model.get_batch(
                        dev_set, bucket_id)
                    _, eval_loss, _ = model.step(sess,
                                                 encoder_inputs,
                                                 decoder_inputs,
                                                 target_weights,
                                                 bucket_id,
                                                 forward_only=True,
                                                 force_dec_input=False)

                    eval_ppx = math.exp(
                        eval_loss) if eval_loss < 300 else float('inf')
                    print("  eval: bucket %d perplexity %.2f" %
                          (bucket_id, eval_ppx))

                sys.stdout.flush()
示例#4
0
def train():
    print("Preparing dialog data in %s" % FLAGS.data_dir)
    train_data, dev_data, _ = data_utils.prepare_dialog_data(
        FLAGS.data_dir, FLAGS.vocab_size)
    with tf.Session() as sess:

        # Create model.
        print("Creating %d layers of %d units." %
              (FLAGS.num_layers, FLAGS.size))
        model = create_model(sess, forward_only=False)

        print("Reading development and training data (limit: %d)." %
              FLAGS.max_train_data_size)
        dev_set = read_data(dev_data)
        train_set = read_data(train_data, FLAGS.max_train_data_size)

        train_bucket_sizes = [len(train_set[b]) for b in xrange(len(BUCKETS))]
        train_total_size = float(sum(train_bucket_sizes))

        train_buckets_scale = [
            sum(train_bucket_sizes[:i + 1]) / train_total_size
            for i in xrange(len(train_bucket_sizes))
        ]

        # This is the training loop.
        print("Start training ...")
        step_time, loss = 0.0, 0.0
        current_step = 0
        previous_losses = []

        while True:
            random_number_01 = np.random.random_sample()
            bucket_id = min([
                i for i in xrange(len(train_buckets_scale))
                if train_buckets_scale[i] > random_number_01
            ])

            # Get a batch and make a step.
            start_time = time.time()
            encoder_inputs, decoder_inputs, target_weights = model.get_batch(
                train_set, bucket_id)

            _, step_loss, _ = model.step(sess,
                                         encoder_inputs,
                                         decoder_inputs,
                                         target_weights,
                                         bucket_id,
                                         forward_only=False)

            step_time += (time.time() -
                          start_time) / FLAGS.steps_per_checkpoint
            loss += step_loss / FLAGS.steps_per_checkpoint
            current_step += 1

            if current_step % FLAGS.steps_per_checkpoint == 0:
                perplexity = math.exp(loss) if loss < 300 else float('inf')
                print(
                    "global step %d learning rate %.4f step-time %.2f perplexity %.2f"
                    % (model.global_step.eval(), model.learning_rate.eval(),
                       step_time, perplexity))
                if len(previous_losses) > 2 and loss > max(
                        previous_losses[-3:]):
                    sess.run(model.learning_rate_decay_op)

                previous_losses.append(loss)

                checkpoint_path = os.path.join(FLAGS.model_dir, "model.ckpt")
                model.saver.save(sess,
                                 checkpoint_path,
                                 global_step=model.global_step)
                step_time, loss = 0.0, 0.0

                for bucket_id in xrange(len(BUCKETS)):
                    encoder_inputs, decoder_inputs, target_weights = model.get_batch(
                        dev_set, bucket_id)
                    _, eval_loss, _ = model.step(sess, encoder_inputs,
                                                 decoder_inputs,
                                                 target_weights, bucket_id,
                                                 True)

                    eval_ppx = math.exp(
                        eval_loss) if eval_loss < 300 else float('inf')
                    print("  eval: bucket %d perplexity %.2f" %
                          (bucket_id, eval_ppx))

                sys.stdout.flush()
示例#5
0
def train():
    print("Preparing dialog data in %s" % FLAGS.data_dir)
    train_data, dev_data, _ = data_utils.prepare_dialog_data(
        FLAGS.data_dir, FLAGS.vocab_size)

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:

        record_file_name = "seq2seq_loss"
        record_file = open(pjoin(FLAGS.results_dir, record_file_name),
                           mode="ab",
                           buffering=0)

        # Create model.
        print("Creating %d layers of %d units." %
              (FLAGS.num_layers, FLAGS.hidden_size))
        model = create_model(sess, forward_only=False)

        # Read data into buckets and compute their sizes.
        print("Reading development and training data (limit: %d)." %
              FLAGS.max_train_data_size)
        dev_set = read_data(dev_data)
        train_set = read_data(train_data, FLAGS.max_train_data_size)
        train_bucket_sizes = [len(train_set[b]) for b in xrange(len(BUCKETS))]
        train_total_size = float(sum(train_bucket_sizes))

        # A bucket scale is a list of increasing numbers from 0 to 1 that we'll use
        # to select a bucket. Length of [scale[i], scale[i+1]] is proportional to
        # the size if i-th training bucket, as used later.
        train_buckets_scale = [
            sum(train_bucket_sizes[:i + 1]) / train_total_size
            for i in xrange(len(train_bucket_sizes))
        ]

        # This is the training loop.
        step_time, total_time, loss = 0.0, 0.0, 0.0
        previous_losses = []

        print("start train...")
        while (True):
            # Choose a bucket according to data distribution. We pick a random number
            # in [0, 1] and use the corresponding interval in train_buckets_scale.
            random_number_01 = np.random.random_sample()
            bucket_id = min([
                i for i in xrange(len(train_buckets_scale))
                if train_buckets_scale[i] > random_number_01
            ])

            # Get a batch and make a step.
            start_time = time.time()
            encoder_inputs, decoder_inputs, target_weights = model.get_batch(
                train_set, bucket_id, "train")

            _, step_loss, _ = model.step(sess,
                                         encoder_inputs,
                                         decoder_inputs,
                                         target_weights,
                                         bucket_id,
                                         forward_only=False)

            step_time += (time.time() -
                          start_time) / FLAGS.steps_per_checkpoint
            loss += step_loss / FLAGS.steps_per_checkpoint

            # Once in a while, we save checkpoint, print statistics, and run evals.
            if model.global_step.eval() % FLAGS.steps_per_checkpoint == 0:
                print("\nTraining...")
                if loss > 6:
                    print("inf !!!")
                    sys.exit(0)
                total_time = step_time * FLAGS.steps_per_checkpoint
                print(
                    "global step %d learning rate %.4f step-time %.2f total_time %.4f, loss %0.7f"
                    % (model.global_step.eval(), model.learning_rate.eval(),
                       step_time, total_time, loss))

                # Decrease learning rate if no improvement was seen over last 3 times.
                if len(previous_losses) > 2 and loss > max(
                        previous_losses[-3:]):
                    sess.run(model.learning_rate_decay_op)

                previous_losses.append(loss)

                # Run evals on development set and print their perplexity.
                for bucket_id in xrange(len(BUCKETS)):
                    print("Testing...")

                    encoder_inputs, decoder_inputs, target_weights = model.get_batch(
                        dev_set, bucket_id, "train")
                    _, eval_loss, _ = model.step(sess, encoder_inputs,
                                                 decoder_inputs,
                                                 target_weights, bucket_id,
                                                 True)

                    if eval_loss > 6:
                        print("inf !!!")
                        sys.exit(0)

                    print("eval: bucket %d, loss %0.5f" %
                          (bucket_id, eval_loss))

                sys.stdout.flush()
                record_file.write("%d\t%.5f\t%.5f\n" %
                                  (model.global_step.eval(), loss, eval_loss))

                # Save checkpoint and zero timer and loss.
                if model.global_step.eval(
                ) % FLAGS.steps_per_predictpoint == 0:
                    checkpoint_path = os.path.join(FLAGS.model_dir,
                                                   "model.ckpt")
                    model.saver.save(sess,
                                     checkpoint_path,
                                     global_step=model.global_step)
                step_time, loss = 0.0, 0.0
def train(args):
    print("[%s] Preparing dialog data in %s" % (args.model_name, args.data_dir))
    setup_workpath(workspace=args.workspace)
    train_data, dev_data, _ = data_utils.prepare_dialog_data(args.data_dir, args.vocab_size)

    if args.reinforce_learn:
      args.batch_size = 1  # We decode one sentence at a time.

    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_usage)
    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:

        # Create model.
        print("Creating %d layers of %d units." % (args.num_layers, args.size))
        model = seq2seq_model_utils.create_model(sess, args, forward_only=False)

        # Read data into buckets and compute their sizes.
        print("Reading development and training data (limit: %d)." % args.max_train_data_size)
        dev_set = data_utils.read_data(dev_data, args.buckets, reversed=args.rev_model)
        train_set = data_utils.read_data(train_data, args.buckets, args.max_train_data_size, reversed=args.rev_model)
        train_bucket_sizes = [len(train_set[b]) for b in xrange(len(args.buckets))]
        train_total_size = float(sum(train_bucket_sizes))

        # A bucket scale is a list of increasing numbers from 0 to 1 that we'll use
        # to select a bucket. Length of [scale[i], scale[i+1]] is proportional to
        # the size if i-th training bucket, as used later.
        train_buckets_scale = [sum(train_bucket_sizes[:i + 1]) / train_total_size
                               for i in xrange(len(train_bucket_sizes))]

        # This is the training loop.
        step_time, loss = 0.0, 0.0
        current_step = 0
        previous_losses = []

        # Load vocabularies.
        vocab_path = os.path.join(args.data_dir, "vocab%d.in" % args.vocab_size)
        vocab, rev_vocab = data_utils.initialize_vocabulary(vocab_path)

        while True:
          # Choose a bucket according to data distribution. We pick a random number
          # in [0, 1] and use the corresponding interval in train_buckets_scale.
          random_number_01 = np.random.random_sample()
          bucket_id = min([i for i in xrange(len(train_buckets_scale))
                           if train_buckets_scale[i] > random_number_01])

          # Get a batch and make a step.
          start_time = time.time()
          encoder_inputs, decoder_inputs, target_weights = model.get_batch(
              train_set, bucket_id)

          # print("[shape]", np.shape(encoder_inputs), np.shape(decoder_inputs), np.shape(target_weights))
          if args.reinforce_learn:
            _, step_loss, _ = model.step_rf(args, sess, encoder_inputs, decoder_inputs,
                                         target_weights, bucket_id, rev_vocab=rev_vocab)
          else:
            _, step_loss, _ = model.step(sess, encoder_inputs, decoder_inputs,
                                         target_weights, bucket_id, forward_only=False, force_dec_input=True)

          step_time += (time.time() - start_time) / args.steps_per_checkpoint
          loss += step_loss / args.steps_per_checkpoint
          current_step += 1

          # Once in a while, we save checkpoint, print statistics, and run evals.
          if (current_step % args.steps_per_checkpoint == 0) and (not args.reinforce_learn):
            # Print statistics for the previous epoch.
            perplexity = math.exp(loss) if loss < 300 else float('inf')
            print ("global step %d learning rate %.4f step-time %.2f perplexity %.2f @ %s" %
                   (model.global_step.eval(), model.learning_rate.eval(), step_time, perplexity, datetime.now()))

            # Decrease learning rate if no improvement was seen over last 3 times.
            if len(previous_losses) > 2 and loss > max(previous_losses[-3:]):
              sess.run(model.learning_rate_decay_op)

            previous_losses.append(loss)

            # # Save checkpoint and zero timer and loss.
            checkpoint_path = os.path.join(args.model_dir, "model.ckpt")
            model.saver.save(sess, checkpoint_path, global_step=model.global_step)
            step_time, loss = 0.0, 0.0

            # Run evals on development set and print their perplexity.
            for bucket_id in xrange(len(args.buckets)):
              encoder_inputs, decoder_inputs, target_weights = model.get_batch(dev_set, bucket_id)
              _, eval_loss, _ = model.step(sess, encoder_inputs, decoder_inputs, 
                                          target_weights, bucket_id, forward_only=True, force_dec_input=False)

              eval_ppx = math.exp(eval_loss) if eval_loss < 300 else float('inf')
              print("  eval: bucket %d perplexity %.2f" % (bucket_id, eval_ppx))

            sys.stdout.flush()
示例#7
0
    def train(self):
        print ('---------prepare data---------')
        pos_id_path, neg_id_path, vocab_path = data_utils.prepare_diologue(self.works_dir, self.data_path, self.vocab_size)
        print ('loading dictionary...')
        vocab, rev_vocab = data_utils.initialize_vocab(vocab_path)
        print ('loading pos set form %s...' %pos_id_path)
        pos_set = data_utils.read_data(pos_id_path, 1.0)
        print ('loading neg set from %s...' %neg_id_path)
        neg_set = data_utils.read_data(neg_id_path, 0.0)

        print ('---------building model---------')
        self.sess.run(tf.global_variables_initializer())
        ckpt = tf.train.get_checkpoint_state(self.works_dir)
        if ckpt and ckpt.model_checkpoint_path:
            print("Reading model parameters from %s @ %s" % (ckpt.model_checkpoint_path, datetime.now()))
            self.saver.restore(self.sess, ckpt.model_checkpoint_path)
            print("Model reloaded @ %s" % (datetime.now()))
        else:
            print ('Creating new parameters @ %s'  % (datetime.now()))

        pos_seq_loss, neg_seq_loss = 0.0, 0.0

        if self.pretrain:
            print ('--------start pretraining-------')
            for i in range(2000):
                batch_encoder_inputs, batch_target_weights = self.get_batch(pos_set, neg_set)
                pos_sequence_loss, neg_sequence_loss = self.pretrain_step(self.sess, batch_encoder_inputs, batch_target_weights)

                pos_seq_loss += pos_sequence_loss / self.checkpoint_step
                neg_seq_loss += neg_sequence_loss / self.checkpoint_step

                if (i+1) % self.checkpoint_step == 0:
                    print ("iter: ", i, "pos_perplexity %.4f neg_d_loss %.4f @ %s" %(math.exp(pos_seq_loss), math.exp(neg_seq_loss), datetime.now()))
                    pos_seq_loss, neg_seq_loss = 0.0, 0.0

        g_loss, pos_d_loss, neg_d_loss, pos_seq_loss, neg_seq_loss = 0.0, 0.0, 0.0, 0.0, 0.0
        current_step = 0

        print ('---------start training---------')
        while True:
            if current_step == 200000:
                break
            batch_encoder_inputs, batch_target_weights = self.get_batch(pos_set, neg_set)
            gen_loss, pos_adv_loss, neg_adv_loss, pos_sequence_loss, neg_sequence_loss = self.g_step(self.sess, batch_encoder_inputs, batch_target_weights, train=True)
            pos_dis_loss, neg_dis_loss, pos_adv_loss, neg_adv_loss = self.d_step(self.sess, batch_encoder_inputs, train=True)

            g_loss += gen_loss / self.checkpoint_step
            pos_d_loss += pos_dis_loss / self.checkpoint_step
            neg_d_loss += neg_dis_loss / self.checkpoint_step
            pos_seq_loss += pos_sequence_loss / self.checkpoint_step
            neg_seq_loss += neg_sequence_loss / self.checkpoint_step

            # print (current_step)
            current_step += 1
            if current_step % self.checkpoint_step == 0:
                print ('global step', self.sess.run(self.global_step), end='')
                print (" g_loss %.4f pos_d_loss %.4f neg_d_loss %.4f " %(g_loss, pos_d_loss, neg_d_loss), end='')
                print ("pos_perplexity %.4f neg_d_loss %.4f @ %s" %(math.exp(pos_seq_loss), math.exp(neg_seq_loss), datetime.now()))
                g_loss, pos_d_loss, neg_d_loss, pos_seq_loss, neg_seq_loss = 0.0, 0.0, 0.0, 0.0, 0.0

            if current_step % (5*self.checkpoint_step) == 0:
                checkpoint_path = os.path.join(self.works_dir, "model.ckpt")
                self.saver.save(self.sess, checkpoint_path)
                true_pos_id, true_neg_id, fake_pos_id, fake_neg_id = self.get_sentence(self.sess, batch_encoder_inputs)
                print ('fake_pos:', self.print_sentence(fake_pos_id, rev_vocab, 0))
                print ('fake_neg:', self.print_sentence(fake_neg_id, rev_vocab, 0))
                print ('true_pos:', self.print_sentence(true_pos_id, rev_vocab, 0))
                print ('true_neg:', self.print_sentence(true_neg_id, rev_vocab, 0))