コード例 #1
0
def chat(args):
    with tf.Session() as sess:
        _, _, vocab_path = data_utils.prepare_diologue(args.works_dir, args.vocab_size)
        print ()
        print ('-------loading model-------')
        args.batch_size = 1
        model = v_autoencoder(sess, args, feed_previous=True)
        sess.run(tf.global_variables_initializer())
        ckpt = tf.train.get_checkpoint_state(args.cyc_dir)
        if ckpt and ckpt.model_checkpoint_path:
            print("Reading model parameters from %s @ %s" % (ckpt.model_checkpoint_path, datetime.now()))
            model.trans_saver.restore(sess, ckpt.model_checkpoint_path)
            print("Model reloaded @ %s" % (datetime.now()))
        ckpt = tf.train.get_checkpoint_state(args.class_dir)
        if ckpt and ckpt.model_checkpoint_path:
            print("Reading model parameters from %s @ %s" % (ckpt.model_checkpoint_path, datetime.now()))
            model.class_saver.restore(sess, ckpt.model_checkpoint_path)
            print("Model reloaded @ %s" % (datetime.now()))
        ckpt = tf.train.get_checkpoint_state(args.model_dir)
        if ckpt and ckpt.model_checkpoint_path:
            print("Reading model parameters from %s @ %s" % (ckpt.model_checkpoint_path, datetime.now()))
            model.saver.restore(sess, ckpt.model_checkpoint_path)
            print("Model reloaded @ %s" % (datetime.now()))

        vocab, rev_vocab = data_utils.initialize_vocab(vocab_path)

        sys.stdout.write("Input:     ")
        sys.stdout.flush()
        sentence = sys.stdin.readline()

        while sentence:
            get_sentence(sess, sentence, vocab, rev_vocab, model)
            sys.stdout.write("Input:     ")
            sys.stdout.flush()
            sentence = sys.stdin.readline()
コード例 #2
0
    def test(self):
        print ('---------prepare data---------')
        data_id_path, vocab_path = data_utils.prepare_test_data(self.works_dir, self.data_path, self.vocab_size)
        print ('loading dictionary...')
        vocab, rev_vocab = data_utils.initialize_vocab(vocab_path)
        self.sess.run(tf.global_variables_initializer())
        ckpt = tf.train.get_checkpoint_state(self.works_dir)
        if ckpt and ckpt.model_checkpoint_path:
            print("Reading model parameters from %s @ %s" % (ckpt.model_checkpoint_path, datetime.now()))
            self.saver.restore(self.sess, ckpt.model_checkpoint_path)
            print("Model reloaded @ %s" % (datetime.now()))
        else:
            raise ValueError("can't find the path" )

        print ('loading data set form %s...' %data_id_path)
        data_set = data_utils.read_data(data_id_path, 1.0)
        fout = open(os.path.join(self.works_dir, 'seq2seq_pos.txt'), 'w')
        for batch_encoder_inputs, _, i in self.get_batch_epoch(data_set, data_set):
            _, _, fake_pos_id, _ = self.get_sentence(self.sess, batch_encoder_inputs)
            data_size = self.batch_size
            if i + self.batch_size > len(data_set):
                data_size = len(data_set) - i
            print ('\r', data_size, i, end='')

            for t in range(data_size):
                fout.write(self.print_sentence(fake_pos_id, rev_vocab, t)+'\n')
コード例 #3
0
def step2(args): # training sentiment classifier
    pos_id_path, neg_id_path, vocab_path = data_utils.prepare_cyc_diologue(args.works_dir, args.vocab_size)

    checkpoint_step = 200
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=1)
    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
        print ()
        print ('-------loading model-------')
        model = v_autoencoder(sess, args, feed_previous=True)
        sess.run(tf.global_variables_initializer())
        ckpt = tf.train.get_checkpoint_state(args.class_dir)
        if ckpt and ckpt.model_checkpoint_path:
            print("Reading model parameters from %s @ %s" % (ckpt.model_checkpoint_path, datetime.now()))
            model.class_saver.restore(sess, ckpt.model_checkpoint_path)
            print("Model reloaded @ %s" % (datetime.now()))

        ckpt = tf.train.get_checkpoint_state(args.model_dir)
        if ckpt and ckpt.model_checkpoint_path:
            print("Reading model parameters from %s @ %s" % (ckpt.model_checkpoint_path, datetime.now()))
            model.saver.restore(sess, ckpt.model_checkpoint_path)
            print("Model reloaded @ %s" % (datetime.now()))

        print ('loading dictionary...')
        vocab, rev_vocab = data_utils.initialize_vocab(vocab_path)
        print ('loading dev set form %s...' %pos_id_path)
        pos_set = data_utils.read_cyc_data(pos_id_path)
        print ('loading train set from %s...' %neg_id_path)
        neg_set = data_utils.read_cyc_data(neg_id_path)
        test_pos = pos_set[-30000:]
        test_neg = neg_set[-30000:]
        train_pos = pos_set[:-30000]
        train_neg = neg_set[:-30000]
        print ('-------start training-------')
        
        acc = 0.0
        current_step = 0
        
        while True:
            encoder_inputs, decoder_inputs, masks, labels = model.class_get_batch(train_pos, train_neg)
            step_acc, _ = model.class_step(sess, encoder_inputs, decoder_inputs, masks, labels, False)
            # sess.run(model.class_rate_op)
            acc  += step_acc  / checkpoint_step
            current_step += 1

            if current_step % checkpoint_step == 0:
                print ("global step %d acc %.4f @ %s" %(model.class_global_step.eval(), acc, datetime.now()))
 
                checkpoint_path = os.path.join(args.class_dir, "model.ckpt")
                model.class_saver.save(sess, checkpoint_path )

                acc = 0.0
                encoder_inputs, decoder_inputs, masks, labels = model.class_get_batch(test_pos, test_neg)
                step_acc, _ = model.class_step(sess, encoder_inputs, decoder_inputs, masks,labels, True)
                # print (test)
                print ("  eval: acc %.4f" %(step_acc))
                sys.stdout.flush()
コード例 #4
0
def step1(args): # training variational autoencoder
    train_id_path, dev_id_path, vocab_path = data_utils.prepare_diologue(args.works_dir, args.vocab_size)

    checkpoint_step = 200
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=1)
    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
        print ()
        print ('-------loading model-------')
        model = v_autoencoder(sess, args, feed_previous=False)
        ckpt = tf.train.get_checkpoint_state(args.model_dir)
        if ckpt and ckpt.model_checkpoint_path:
            print("Reading model parameters from %s @ %s" % (ckpt.model_checkpoint_path, datetime.now()))
            sess.run(tf.global_variables_initializer())
            model.saver.restore(sess, ckpt.model_checkpoint_path)
            print("Model reloaded @ %s" % (datetime.now()))
        else:
            print("Created model with fresh parameters.")
            sess.run(tf.global_variables_initializer())

        print ('loading dictionary...')
        vocab, rev_vocab = data_utils.initialize_vocab(vocab_path)

        print ('loading dev set form %s...' %dev_id_path)
        dev_set = data_utils.read_data(dev_id_path)
        print ('loading train set form %s...' %train_id_path)
        train_set = data_utils.read_data(train_id_path)
        print ('-------start training-------')
        seq_loss, kl_loss = 0.0, 0.0
        current_step = 0
        previous_losses = []

        while True:
            encoder_inputs, decoder_inputs, weights = model.train_get_batch(train_set)
            step_seq_loss, step_kl_loss = model.ae_step(sess, encoder_inputs, decoder_inputs, weights, False)
            sess.run(model.kl_weight_op)
            sess.run(model.sample_rate_op)
            seq_loss += step_seq_loss / checkpoint_step
            kl_loss  += step_kl_loss / checkpoint_step
            current_step += 1
            if current_step % checkpoint_step == 0:
                print ("global step %d seq_loss %.4f kl_loss %.4f @ %s" %(model.global_step.eval(), math.exp(seq_loss), kl_loss, datetime.now()))

                checkpoint_path = os.path.join(args.model_dir, "model.ckpt")
                model.saver.save(sess, checkpoint_path )
                seq_loss, kl_loss = 0.0, 0.0
                encoder_inputs, decoder_inputs, weights = model.train_get_batch(dev_set)
                step_seq_loss, step_kl_loss = model.ae_step(sess, encoder_inputs, decoder_inputs, weights, True)
                print ("  eval: seq_loss %.2f" %(math.exp(step_seq_loss)))
                sys.stdout.flush()
コード例 #5
0
def test(args):
    train_id_path, dev_id_path, vocab_path = data_utils.prepare_diologue(
        args.works_dir, args.vocab_size)
    pos_id_path, neg_id_path, vocab_path = data_utils.prepare_cyc_diologue(
        args.works_dir, args.vocab_size)

    checkpoint_step = 200
    args.batch_size = 1
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=1)
    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
        print()
        print('-------loading model-------')
        model = v_autoencoder(sess, args, feed_previous=True)
        sess.run(tf.global_variables_initializer())
        ckpt = tf.train.get_checkpoint_state(args.cyc_dir)
        if ckpt and ckpt.model_checkpoint_path:
            print("Reading model parameters from %s @ %s" %
                  (ckpt.model_checkpoint_path, datetime.now()))
            model.trans_saver.restore(sess, ckpt.model_checkpoint_path)
            print("Model reloaded @ %s" % (datetime.now()))
        ckpt = tf.train.get_checkpoint_state(args.class_dir)
        if ckpt and ckpt.model_checkpoint_path:
            print("Reading model parameters from %s @ %s" %
                  (ckpt.model_checkpoint_path, datetime.now()))
            model.class_saver.restore(sess, ckpt.model_checkpoint_path)
            print("Model reloaded @ %s" % (datetime.now()))
        ckpt = tf.train.get_checkpoint_state(args.model_dir)
        if ckpt and ckpt.model_checkpoint_path:
            print("Reading model parameters from %s @ %s" %
                  (ckpt.model_checkpoint_path, datetime.now()))
            model.saver.restore(sess, ckpt.model_checkpoint_path)
            print("Model reloaded @ %s" % (datetime.now()))

        vocab, rev_vocab = data_utils.initialize_vocab(vocab_path)

        with open(os.path.join(args.works_dir, 'seq2seq.txt'), 'r') as fin:
            with open(os.path.join(args.works_dir, 'neg2pos.txt'),
                      'w') as fout:
                for l, seq in enumerate(fin):
                    print(('\r%d' % l), end='')
                    sentence = seq.strip()
                    out = get_sentence(sess, sentence, vocab, rev_vocab, model)
                    fout.write(out + '\n')
コード例 #6
0
    def chat(self):
        print ('---------prepare data---------')
        _, _, vocab_path = data_utils.prepare_diologue(self.works_dir, self.data_path, self.vocab_size)
        print ('loading dictionary...')
        vocab, rev_vocab = data_utils.initialize_vocab(vocab_path)
        print ('---------building model---------')
        self.sess.run(tf.global_variables_initializer())
        ckpt = tf.train.get_checkpoint_state(self.works_dir)
        if ckpt and ckpt.model_checkpoint_path:
            print("Reading model parameters from %s @ %s" % (ckpt.model_checkpoint_path, datetime.now()))
            self.saver.restore(self.sess, ckpt.model_checkpoint_path)
            print("Model reloaded @ %s" % (datetime.now()))
        else:
            raise ValueError("can't find the path" )

        sentence = input('Input: ')
        pat = re.compile('(\W+)')

        while sentence:
            true_neg_id, fake_pos_id = self.test_sentence(self.sess, sentence, vocab, rev_vocab, pat)
            print ('fake_pos:', self.print_sentence(fake_pos_id, rev_vocab, 0))
            print ('true_neg:', self.print_sentence(true_neg_id, rev_vocab, 0))
            sentence = input('Input: ')
コード例 #7
0
def step3(args): # training transfer network
    pos_id_path, neg_id_path, vocab_path = data_utils.prepare_cyc_diologue(args.works_dir, args.vocab_size)
    checkpoint_step = 100
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=1)
    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
        print ()
        print ('-------loading model-------')
        model = v_autoencoder(sess, args, feed_previous=True)
        sess.run(tf.global_variables_initializer())
        ckpt = tf.train.get_checkpoint_state(args.cyc_dir)
        if ckpt and ckpt.model_checkpoint_path:
            print("Reading model parameters from %s @ %s" % (ckpt.model_checkpoint_path, datetime.now()))
            model.trans_saver.restore(sess, ckpt.model_checkpoint_path)
            print("Model reloaded @ %s" % (datetime.now()))
        ckpt = tf.train.get_checkpoint_state(args.class_dir)
        if ckpt and ckpt.model_checkpoint_path:
            print("Reading model parameters from %s @ %s" % (ckpt.model_checkpoint_path, datetime.now()))
            model.class_saver.restore(sess, ckpt.model_checkpoint_path)
            print("Model reloaded @ %s" % (datetime.now()))
        ckpt = tf.train.get_checkpoint_state(args.model_dir)
        if ckpt and ckpt.model_checkpoint_path:
            print("Reading model parameters from %s @ %s" % (ckpt.model_checkpoint_path, datetime.now()))
            model.saver.restore(sess, ckpt.model_checkpoint_path)
            print("Model reloaded @ %s" % (datetime.now()))


        print ('loading dictionary...')
        vocab, rev_vocab = data_utils.initialize_vocab(vocab_path)
        print ('loading dev set form %s...' %pos_id_path)
        pos_set = data_utils.read_cyc_data(pos_id_path)
        print ('loading train set from %s...' %neg_id_path)
        neg_set = data_utils.read_cyc_data(neg_id_path)
        print ('-------start training-------')

        style, content = 0.0, 0.0
        current_step = 0

        while True:
            encoder_inputs, decoder_inputs, _, _ = model.class_get_batch(pos_set, neg_set, 0.7)
            step_style, step_content = model.trans_step(sess, encoder_inputs, decoder_inputs, False)
            print ('\rstep: %d --style %.3f --content %.3f' %(current_step, step_style, step_content), end='')
            sess.run(model.trans_weight_op)
            style += step_style / checkpoint_step
            content += step_content / checkpoint_step
            current_step += 1

            if current_step % checkpoint_step == 0:
                print ()
                # print ("global step %d  @ %s" %(model.trans_global_step.eval(), datetime.now()))
                # print ("global step ", style,  " style ", content, )
                print ("global step %d style %.3f content %.3f @ %s" %(model.trans_global_step.eval(), style, content, datetime.now()))
                
                checkpoint_path = os.path.join(args.cyc_dir, "model.ckpt")
                model.trans_saver.save(sess, checkpoint_path )
                style, content = 0.0, 0.0

                encoder_inputs, decoder_inputs, _, _ = model.class_get_batch(pos_set, neg_set, 1.0)
                step_style, step_content = model.trans_step(sess, encoder_inputs, decoder_inputs, True)
                outputs, trans_outputs = model.trans_chat(sess, encoder_inputs, decoder_inputs)
                print ('score: ', step_style)
                print ('example --')
                print ('  outputs       : ', print_sentence(outputs, rev_vocab, 0))
                print ('  trans_outputs : ', print_sentence(trans_outputs, rev_vocab, 0))
コード例 #8
0
    def train(self):
        print ('---------prepare data---------')
        pos_id_path, neg_id_path, vocab_path = data_utils.prepare_diologue(self.works_dir, self.data_path, self.vocab_size)
        print ('loading dictionary...')
        vocab, rev_vocab = data_utils.initialize_vocab(vocab_path)
        print ('loading pos set form %s...' %pos_id_path)
        pos_set = data_utils.read_data(pos_id_path, 1.0)
        print ('loading neg set from %s...' %neg_id_path)
        neg_set = data_utils.read_data(neg_id_path, 0.0)

        print ('---------building model---------')
        self.sess.run(tf.global_variables_initializer())
        ckpt = tf.train.get_checkpoint_state(self.works_dir)
        if ckpt and ckpt.model_checkpoint_path:
            print("Reading model parameters from %s @ %s" % (ckpt.model_checkpoint_path, datetime.now()))
            self.saver.restore(self.sess, ckpt.model_checkpoint_path)
            print("Model reloaded @ %s" % (datetime.now()))
        else:
            print ('Creating new parameters @ %s'  % (datetime.now()))

        pos_seq_loss, neg_seq_loss = 0.0, 0.0

        if self.pretrain:
            print ('--------start pretraining-------')
            for i in range(2000):
                batch_encoder_inputs, batch_target_weights = self.get_batch(pos_set, neg_set)
                pos_sequence_loss, neg_sequence_loss = self.pretrain_step(self.sess, batch_encoder_inputs, batch_target_weights)

                pos_seq_loss += pos_sequence_loss / self.checkpoint_step
                neg_seq_loss += neg_sequence_loss / self.checkpoint_step

                if (i+1) % self.checkpoint_step == 0:
                    print ("iter: ", i, "pos_perplexity %.4f neg_d_loss %.4f @ %s" %(math.exp(pos_seq_loss), math.exp(neg_seq_loss), datetime.now()))
                    pos_seq_loss, neg_seq_loss = 0.0, 0.0

        g_loss, pos_d_loss, neg_d_loss, pos_seq_loss, neg_seq_loss = 0.0, 0.0, 0.0, 0.0, 0.0
        current_step = 0

        print ('---------start training---------')
        while True:
            if current_step == 200000:
                break
            batch_encoder_inputs, batch_target_weights = self.get_batch(pos_set, neg_set)
            gen_loss, pos_adv_loss, neg_adv_loss, pos_sequence_loss, neg_sequence_loss = self.g_step(self.sess, batch_encoder_inputs, batch_target_weights, train=True)
            pos_dis_loss, neg_dis_loss, pos_adv_loss, neg_adv_loss = self.d_step(self.sess, batch_encoder_inputs, train=True)

            g_loss += gen_loss / self.checkpoint_step
            pos_d_loss += pos_dis_loss / self.checkpoint_step
            neg_d_loss += neg_dis_loss / self.checkpoint_step
            pos_seq_loss += pos_sequence_loss / self.checkpoint_step
            neg_seq_loss += neg_sequence_loss / self.checkpoint_step

            # print (current_step)
            current_step += 1
            if current_step % self.checkpoint_step == 0:
                print ('global step', self.sess.run(self.global_step), end='')
                print (" g_loss %.4f pos_d_loss %.4f neg_d_loss %.4f " %(g_loss, pos_d_loss, neg_d_loss), end='')
                print ("pos_perplexity %.4f neg_d_loss %.4f @ %s" %(math.exp(pos_seq_loss), math.exp(neg_seq_loss), datetime.now()))
                g_loss, pos_d_loss, neg_d_loss, pos_seq_loss, neg_seq_loss = 0.0, 0.0, 0.0, 0.0, 0.0

            if current_step % (5*self.checkpoint_step) == 0:
                checkpoint_path = os.path.join(self.works_dir, "model.ckpt")
                self.saver.save(self.sess, checkpoint_path)
                true_pos_id, true_neg_id, fake_pos_id, fake_neg_id = self.get_sentence(self.sess, batch_encoder_inputs)
                print ('fake_pos:', self.print_sentence(fake_pos_id, rev_vocab, 0))
                print ('fake_neg:', self.print_sentence(fake_neg_id, rev_vocab, 0))
                print ('true_pos:', self.print_sentence(true_pos_id, rev_vocab, 0))
                print ('true_neg:', self.print_sentence(true_neg_id, rev_vocab, 0))