Esempio n. 1
0
    def __init__(self, args, is_training=True):
        self.args = args

        if not is_training:
            args.batch_size = 1
            args.seq_length = 1

        if args.model == 'rnn':
            self.cell = rnn_cell.BasicRNNCell(args.rnn_size)
        elif args.model == 'gru':
            self.cell = rnn_cell.GRUCell(args.rnn_size)
        elif args.model == 'lstm':
            self.cell = rnn_cell.BasicLSTMCell(args.rnn_size)
        else:
            raise Exception('model type not supported: {}'.format(args.model))

        self.cell = rnn_cell.MultiRNNCell([self.cell] * args.num_layers)

        self.input_data    = tf.placeholder(tf.int32, [args.batch_size, args.seq_length])
        self.targets       = tf.placeholder(tf.int32, [args.batch_size, args.seq_length]) # Target replication
        self.initial_state = self.cell.zero_state(args.batch_size, tf.float32)

        with tf.variable_scope('rnn'):
            softmax_w = tf.get_variable('softmax_w', [args.rnn_size, 2])
            softmax_b = tf.get_variable('softmax_b', [2])

            with tf.device('/cpu:0'):
                embedding = tf.get_variable('embedding', [args.vocab_size, args.rnn_size])
                inputs    = tf.split(1, args.seq_length, tf.nn.embedding_lookup(embedding, self.input_data))
                inputs    = [tf.squeeze(i, [1]) for i in inputs]

            outputs, last_state = seq2seq.rnn_decoder(inputs, self.initial_state, 
                self.cell, loop_function=None)

        output_tf   = tf.reshape(tf.concat(1, outputs), [-1, args.rnn_size])
        self.logits = tf.nn.xw_plus_b(output_tf, softmax_w, softmax_b)
        self.probs  = tf.nn.softmax(self.logits)
        
        loss = seq2seq.sequence_loss_by_example(
            [self.logits],
            [tf.reshape(self.targets, [-1])],
            [tf.ones([args.batch_size * args.seq_length])])

        self.cost = tf.reduce_sum(loss) / args.batch_size / args.seq_length

        self.final_state = last_state
        self.lr          = tf.Variable(0.0, trainable = False)
        tvars            = tf.trainable_variables()
        grads, _         = tf.clip_by_global_norm(tf.gradients(self.cost, tvars, aggregation_method=2), args.grad_clip)
        optimizer        = tf.train.AdamOptimizer(self.lr)
        self.train_op    = optimizer.apply_gradients(zip(grads, tvars))
Esempio n. 2
0
    def __init__(self,
                 sess,
                 vocab_size,
                 cell_size,
                 embedding_size,
                 num_layer,
                 memory_size,
                 log_dir,
                 learning_rate=0.001,
                 momentum=0.9,
                 learning_rate_decay_factor=0.85,
                 use_dropout=True,
                 l2_coef=1e-6):

        with tf.name_scope("io"):
            self.inputs = tf.placeholder(dtype=tf.int32,
                                         shape=(None, None),
                                         name="prev_words")
            self.input_lens = tf.placeholder(dtype=tf.int32,
                                             shape=(None, ),
                                             name="sent_len")
            self.labels = tf.placeholder(dtype=tf.int32,
                                         shape=(None, None),
                                         name="next_words")
            self.keep_prob = tf.placeholder(dtype=tf.float32, name="keep_prob")
            self.learning_rate = tf.Variable(float(learning_rate),
                                             trainable=False)
            self.learning_rate_decay_op = self.learning_rate.assign(
                self.learning_rate * learning_rate_decay_factor)

        max_sent_len = array_ops.shape(self.labels)[1]
        with variable_scope.variable_scope("word-embedding"):
            embedding = tf.get_variable("embedding",
                                        [vocab_size, embedding_size],
                                        dtype=tf.float32)
            input_embedding = embedding_ops.embedding_lookup(
                embedding,
                tf.squeeze(tf.reshape(self.inputs, [-1, 1]), squeeze_dims=[1]))

            input_embedding = tf.reshape(input_embedding,
                                         [-1, max_sent_len, embedding_size])

        with variable_scope.variable_scope("rnn"):
            # cell = tf_helpers.MemoryGRUCell(cell_size, memory_size, attn_size=100)
            cell = rnn_cell.BasicLSTMCell(cell_size)

            if use_dropout:
                cell = rnn_cell.DropoutWrapper(cell,
                                               output_keep_prob=self.keep_prob,
                                               input_keep_prob=self.keep_prob)

            if num_layer > 1:
                cell = rnn_cell.MultiRNNCell([cell] * num_layer,
                                             state_is_tuple=True)

            # add output projection
            cell = tf.nn.rnn_cell.OutputProjectionWrapper(cell, vocab_size)

            # and enc_last_state will be same as the true last state
            self.logits, last_state = tf.nn.dynamic_rnn(
                cell,
                input_embedding,
                dtype=tf.float32,
                sequence_length=self.input_lens,
            )
        self.loss = self.sequence_loss()
        tf.scalar_summary("entropy_loss", self.loss)
        tf.scalar_summary("perplexity", tf.exp(self.loss))
        self.summary_op = tf.merge_all_summaries()

        # weight decay
        """
        if l2_coef > 0.0:
            all_weights = []
            vars = tf.trainable_variables()
            for v in vars:
                if "bias" not in v.name.lower():
                    all_weights.append(tf.nn.l2_loss(v))
                    print("adding l2 to %s" %v.name)

            loss_l2= tf.add_n(all_weights)
            self.reg_loss = self.loss + l2_coef * loss_l2
        else:
            self.reg_loss = self.loss
            """

        # optimization
        tvars = tf.trainable_variables()
        grads, _ = tf.clip_by_global_norm(tf.gradients(self.loss, tvars), 5.0)
        optimizer = tf.train.GradientDescentOptimizer(self.learning_rate)
        self.train_ops = optimizer.apply_gradients(zip(grads, tvars))

        train_log_dir = os.path.join(log_dir, "train")
        valid_log_dir = os.path.join(log_dir, "valid")
        print "Save summary to %s" % log_dir
        self.train_summary_writer = tf.train.SummaryWriter(
            train_log_dir, sess.graph)
        self.valid_summary_writer = tf.train.SummaryWriter(
            valid_log_dir, sess.graph)
        self.saver = tf.train.Saver(tf.all_variables())
Esempio n. 3
0
	def __init__(self, args, is_training=True):

		if not is_training:
			seq_length = 1
		else:
			seq_length = args.seq_length

		if args.model == 'rnn':
			cell_gen = rnn_cell.BasicRNNCell(args.rnn_size)
			cell_dis = rnn_cell.BasicRNNCell(args.rnn_size)
		elif args.model == 'gru':
			cell_gen = rnn_cell.GRUCell(args.rnn_size)
			cell_dis = rnn_cell.GRUCell(args.rnn_size)
		elif args.model == 'lstm':
			cell_gen = rnn_cell.BasicLSTMCell(args.rnn_size)
			cell_dis = rnn_cell.BasicLSTMCell(args.rnn_size)
		else:
			raise Exception('model type not supported: {}'.format(args.model))

		# Pass the generated sequences and targets (1)
		with tf.name_scope('input'):
			with tf.name_scope('data'):
				self.input_data  = tf.placeholder(tf.int32, [args.batch_size, seq_length])
			with tf.name_scope('targets'):
				self.targets     = tf.placeholder(tf.int32, [args.batch_size, seq_length])

		############
		# Generator
		############
		with tf.variable_scope('generator'):
			self.cell_gen = rnn_cell.MultiRNNCell([cell_gen] * args.num_layers)
			self.initial_state_gen = self.cell_gen.zero_state(args.batch_size, tf.float32)	

			with tf.variable_scope('rnn'):
				softmax_w = tf.get_variable('softmax_w', [args.rnn_size, args.vocab_size])
				softmax_b = tf.get_variable('softmax_b', [args.vocab_size])
				
				with tf.device('/cpu:0'):
					embedding  = tf.get_variable('embedding', [args.vocab_size, args.rnn_size])
					inputs_gen = tf.split(1, seq_length, tf.nn.embedding_lookup(
						embedding, self.input_data))
					inputs_gen = [tf.squeeze(i, [1]) for i in inputs_gen]

			outputs_gen, last_state_gen = seq2seq.rnn_decoder(inputs_gen, self.initial_state_gen, 
				self.cell_gen, loop_function=None)
			
			self.logits_sequence = []
			for output_gen in outputs_gen:
				logits_gen  = tf.nn.xw_plus_b(output_gen, softmax_w, softmax_b)
				self.logits_sequence.append(logits_gen)

			self.final_state_gen = last_state_gen

		################
		# Discriminator
		################
		with tf.variable_scope('discriminator'):
			self.cell_dis = rnn_cell.MultiRNNCell([cell_dis] * args.num_layers)
			self.initial_state_dis = self.cell_dis.zero_state(args.batch_size, tf.float32)

			with tf.variable_scope('rnn'):
				softmax_w = tf.get_variable('softmax_w', [args.rnn_size, 2])
				softmax_b = tf.get_variable('softmax_b', [2])

				inputs_dis = []
				embedding  = tf.get_variable('embedding', [args.vocab_size, args.rnn_size])
				for logit in self.logits_sequence:
					inputs_dis.append(tf.matmul(logit, embedding))
					# inputs_dis.append(tf.matmul(tf.nn.softmax(logit), embedding))
					
				outputs_dis, last_state_dis = seq2seq.rnn_decoder(inputs_dis,
					self.initial_state_dis, self.cell_dis, loop_function=None)

			probs, logits = [], []
			for output_dis in outputs_dis:
				logit = tf.nn.xw_plus_b(output_dis, softmax_w, softmax_b)
				prob = tf.nn.softmax(logit)
				logits.append(logit)
				probs.append(prob)

			with tf.name_scope('summary'):
				probs      = tf.pack(probs)
				probs_real = tf.slice(probs, [0,0,1], [args.seq_length, args.batch_size, 1])
				variable_summaries(probs_real, 'probability of real')

			self.final_state_dis = last_state_dis

		#########
		# Train
		#########
		with tf.name_scope('train'):
			gen_loss = seq2seq.sequence_loss_by_example(
				logits,
				tf.unpack(tf.transpose(self.targets)), 
				tf.unpack(tf.transpose(tf.ones_like(self.targets, dtype=tf.float32))))

			self.gen_cost = tf.reduce_sum(gen_loss) / args.batch_size
			tf.scalar_summary('training loss', self.gen_cost)
			self.lr_gen = tf.Variable(0.0, trainable = False)		
			self.tvars 	= tf.trainable_variables()
			gen_vars    = [v for v in self.tvars if not v.name.startswith("discriminator/")]

			if is_training:
				gen_grads            = tf.gradients(self.gen_cost, gen_vars)
				self.all_grads       = tf.gradients(self.gen_cost, self.tvars)
				gen_grads_clipped, _ = tf.clip_by_global_norm(gen_grads, args.grad_clip)
				gen_optimizer        = tf.train.AdamOptimizer(self.lr_gen)
				self.gen_train_op    = gen_optimizer.apply_gradients(
											zip(gen_grads_clipped, gen_vars))				

		with tf.name_scope('summary'):
			with tf.name_scope('weight_summary'):
				for v in self.tvars:
					variable_summaries(v, v.op.name)
			if is_training:
				with tf.name_scope('grad_summary'):
					for var, grad in zip(self.tvars, self.all_grads):
						variable_summaries(grad, 'grad/' + var.op.name)

		self.merged = tf.merge_all_summaries()
Esempio n. 4
0
    def __init__(self, embed_size, lstm_size, vocab_size, \
                 batch_size, seq_length, learn_rate, \
                 keep_prob=1.0, num_layers=2, name='char_lstm'):
        '''
        Initialize a character-level multilayer LSTM language model.
        Arguments:
            @embed_size: dimensions of embedding space
            @vocab_size: number of things in vocabulary (characters!)
            @batch_size: sequences per training batch
            @seq_length: length of sequences in each training batch
            @learn_rate: AdamOptimizer step size
            @keep_prob:  1 - dropout probability
            @num_layers: number of LSTM cells to stack
        '''
        # store params
        self.embed_size, self.lstm_size = embed_size, lstm_size
        self.vocab_size, self.seq_length = vocab_size, seq_length
        self.batch_size, self.learn_rate = batch_size, learn_rate
        self.kp, self.num_layers = keep_prob, num_layers
        self.name = name

        # Placeholders for input/output and dropout
        self.train_inputs = tf.placeholder(tf.int32,
                                           shape=[batch_size, seq_length])
        self.train_targets = tf.placeholder(tf.int32,
                                            shape=[batch_size, seq_length])
        self.sample_inputs = tf.placeholder(tf.int32, shape=[1, 1])

        # Set up embeddings
        E = weight('embedding', [vocab_size, embed_size])
        train_embeddings = tf.nn.embedding_lookup(E,
                                                  self.train_inputs,
                                                  name='train_embeddings')
        dropped_train_embeddings = (tf.nn.dropout(train_embeddings, self.kp)
                                    if self.kp < 1.0 else train_embeddings)
        sample_embeddings = tf.nn.embedding_lookup(E,
                                                   self.sample_inputs,
                                                   name='sample_embeddings')

        # Set up 2-layer LSTM
        # Use dynamic_rnn to run the cells
        with tf.variable_scope('lstm') as scope:
            single_cell = rnn_cell.BasicLSTMCell(lstm_size)
            self.cell = rnn_cell.MultiRNNCell([single_cell] * num_layers,
                                              state_is_tuple=True)
            self.train_init_state = self.cell.zero_state(
                batch_size, tf.float32)
            self.sample_init_state = self.cell.zero_state(1, tf.float32)
            train_outputs, self.train_state = dynamic_rnn(
                self.cell,
                dropped_train_embeddings,
                initial_state=self.train_init_state)
            scope.reuse_variables()
            sample_outputs, self.sample_state = dynamic_rnn(
                self.cell,
                sample_embeddings,
                initial_state=self.sample_init_state)

        reshaped_train_outputs = tf.reshape(
            train_outputs, (batch_size * seq_length, lstm_size))
        reshaped_sample_outputs = tf.reshape(sample_outputs, (1, lstm_size))

        # final feedforward layer (model logits)
        with tf.variable_scope('ff') as scope:
            ff_weights = weight('ff_weights', (lstm_size, vocab_size))
            ff_biases = weight('ff_biases', (vocab_size, ))
            self.ls = train_logits = tf.add(
                ff_biases, tf.matmul(reshaped_train_outputs, ff_weights))
            scope.reuse_variables()
            sample_logits = tf.add(
                ff_biases, tf.matmul(reshaped_sample_outputs, ff_weights))
        self.probs = softmax(sample_logits)

        # softmax and loss for training
        log_perps = tf.nn.seq2seq.sequence_loss_by_example(
            [train_logits], [tf.reshape(self.train_targets, [-1])],
            [tf.ones([batch_size * seq_length])])
        self.loss = tf.reduce_mean(log_perps) / batch_size

        # define trainer, saver, inits
        self.train_op = tf.train.AdamOptimizer(learn_rate).minimize(self.loss)
        self.init_op = tf.initialize_all_variables()
        self.saver = tf.train.Saver()
    def __init__(self,
                 sequence_length,
                 vocab_size,
                 embedding_size,
                 hidden_size,
                 layer_count=1,
                 **kw):
        assert layer_count >= 1, "An LSTM cannot have less than one layer."
        n_classes = kw.get('n_classes', 2)  # >2 not tested.
        self.input_x = tf.placeholder(tf.int32, [None, sequence_length],
                                      name="input_x")
        self.input_y = tf.placeholder(tf.float32, [None, n_classes],
                                      name="input_y")
        self.dropout_keep_prob = tf.placeholder(tf.float32,
                                                name="dropout_keep_prob")

        # Layer 1: Word embeddings
        self.embeddings = tf.Variable(tf.random_uniform(
            [vocab_size, embedding_size], -0.1, 0.1),
                                      name="embeddings")
        embedded_words = tf.nn.embedding_lookup(self.embeddings, self.input_x)

        # Funnel the words into the LSTM.
        # Current size: (batch_size, n_words, emb_dim)
        # Want:         [(batch_size, n_hidden) * n_words]
        #
        # Since otherwise there's no way to feed information into the LSTM cell.
        # Yes, it's a bit confusing, because we want a batch of multiple
        # sequences, with each step being of 'embedding_size'.
        embedded_words = tf.transpose(embedded_words, [1, 0, 2])
        embedded_words = tf.reshape(embedded_words, [-1, embedding_size])
        # Note: 'tf.split' outputs a **Python** list.
        embedded_words = tf.split(0, sequence_length, embedded_words)

        # Layer 2: LSTM cell
        lstm_use_peepholes = True
        # 'state_is_tuple = True' should NOT be used despite the warnings
        # (which appear as of TF 0.9), since it doesn't work on the version of
        # TF installed on Euler (0.8).
        if layer_count > 1:
            print("Using deep {0}-layer LSTM with first layer size {1}"
                  " (embedding size) and hidden layer size {2}.".format(
                      layer_count, embedding_size, hidden_size))
            print("First cell {0}->{1}".format(embedding_size, embedding_size))
            first_cell = TextLSTM._cell(embedding_size, embedding_size,
                                        lstm_use_peepholes,
                                        self.dropout_keep_prob)
            print("Second cell {0}->{1}".format(embedding_size, hidden_size))
            second_cell = TextLSTM._cell(embedding_size, hidden_size,
                                         lstm_use_peepholes,
                                         self.dropout_keep_prob)
            print("Third cell+ {0}->{1} (if applicable)".format(
                hidden_size, hidden_size))
            third_plus = TextLSTM._cell(hidden_size, hidden_size,
                                        lstm_use_peepholes,
                                        self.dropout_keep_prob)
            deep_cells = [third_plus] * (layer_count - 2)
            lstm_cells = rnn_cell.MultiRNNCell([first_cell, second_cell] +
                                               deep_cells)
        else:
            print(
                "Using simple 1-layer LSTM with hidden layer size {0}.".format(
                    hidden_size))
            lstm_cells = rnn_cell.LSTMCell(num_units=hidden_size,
                                           input_size=embedding_size,
                                           forget_bias=1.0,
                                           use_peepholes=lstm_use_peepholes)

        # Q: Can't batches end up containing both positive and negative labels?
        #    Can the LSTM batch training deal with this?
        #
        # A: Yes. Each batch feeds each sentence into the LSTM, incurs the loss,
        #    and backpropagates the error separately. Each example in a bath
        #    is independent. Note that as opposed to language models, for
        #    instance, where we incur a loss for all outputs, in this case we
        #    only care about the final output of the RNN, since it doesn't make
        #    sense to classify incomplete tweets.

        outputs, _states = rnn(lstm_cells,
                               inputs=embedded_words,
                               dtype=tf.float32)

        # Layer 3: Final Softmax
        out_weight = tf.Variable(tf.random_normal([hidden_size, n_classes]))
        out_bias = tf.Variable(tf.random_normal([n_classes]))

        with tf.name_scope("output"):
            lstm_final_output = outputs[-1]
            self.scores = tf.nn.xw_plus_b(lstm_final_output,
                                          out_weight,
                                          out_bias,
                                          name="scores")
            self.predictions = tf.nn.softmax(self.scores, name="predictions")

        with tf.name_scope("loss"):
            self.losses = tf.nn.softmax_cross_entropy_with_logits(
                self.scores, self.input_y)
            self.loss = tf.reduce_mean(self.losses, name="loss")

        with tf.name_scope("accuracy"):
            self.correct_pred = tf.equal(tf.argmax(self.predictions, 1),
                                         tf.argmax(self.input_y, 1))
            self.accuracy = tf.reduce_mean(tf.cast(self.correct_pred, "float"),
                                           name="accuracy")
Esempio n. 6
0
    def __init__(self,
                 sess,
                 vocab_size,
                 cell_size,
                 embedding_size,
                 num_layer,
                 memory_size,
                 log_dir,
                 learning_rate=0.001,
                 momentum=0.9,
                 use_dropout=True,
                 l2_coef=1e-6):

        with tf.name_scope("io"):
            self.inputs = tf.placeholder(dtype=tf.int32,
                                         shape=(None, None),
                                         name="prev_words")
            self.input_lens = tf.placeholder(dtype=tf.int32,
                                             shape=(None, ),
                                             name="sent_len")
            self.labels = tf.placeholder(dtype=tf.int32,
                                         shape=(None, ),
                                         name="next_word")
            self.keep_prob = tf.placeholder(dtype=tf.float32, name="keep_prob")

        with variable_scope.variable_scope("word-embedding"):
            max_sent_len = array_ops.shape(self.inputs)[1]
            embedding = tf_helpers.weight_and_bias(vocab_size,
                                                   embedding_size,
                                                   "embedding_w",
                                                   include_bias=False)
            input_embedding = embedding_ops.embedding_lookup(
                embedding,
                tf.squeeze(tf.reshape(self.inputs, [-1, 1]), squeeze_dims=[1]))

            input_embedding = tf.reshape(input_embedding,
                                         [-1, max_sent_len, embedding_size])

        with variable_scope.variable_scope("rnn"):
            # cell = rnn_cell.LSTMCell(cell_size, use_peepholes=True, state_is_tuple=True)
            cell = tf_helpers.MemoryLSTMCell(cell_size,
                                             memory_size,
                                             use_peepholes=True)

            if use_dropout:
                cell = rnn_cell.DropoutWrapper(cell,
                                               output_keep_prob=self.keep_prob)

            if num_layer > 1:
                cell = rnn_cell.MultiRNNCell([cell] * num_layer,
                                             state_is_tuple=True)

            # and enc_last_state will be same as the true last state
            outputs, _ = tf.nn.dynamic_rnn(
                cell,
                input_embedding,
                dtype=tf.float32,
                sequence_length=self.input_lens,
            )
            # get the TRUE last outputs
            last_outputs = tf.reduce_sum(
                tf.mul(
                    outputs,
                    tf.expand_dims(
                        tf.one_hot(self.input_lens - 1, max_sent_len), -1)), 1)
            proj_w, proj_b = tf_helpers.weight_and_bias(cell_size,
                                                        vocab_size,
                                                        "output_project",
                                                        include_bias=True)
            self.logits = tf.matmul(last_outputs, proj_w) + proj_b

        self.loss = tf.reduce_mean(
            nn_ops.sparse_softmax_cross_entropy_with_logits(
                self.logits, self.labels))
        tf.scalar_summary("entropy_loss", self.loss)
        tf.scalar_summary("perplexity", tf.exp(self.loss))
        self.summary_op = tf.merge_all_summaries()

        # weight decay
        vars = tf.trainable_variables()
        all_weights = []
        for v in vars:
            if "bias" not in v.name.lower():
                all_weights.append(tf.nn.l2_loss(v))
                print("adding l2 to %s" % v.name)
        loss_l2 = tf.add_n(all_weights)
        self.reg_loss = self.loss + l2_coef * loss_l2

        # optimization
        optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
        self.train_ops = optimizer.minimize(self.reg_loss)

        train_log_dir = os.path.join(log_dir, "train")
        valid_log_dir = os.path.join(log_dir, "valid")
        print "Save summary to %s" % log_dir
        self.train_summary_writer = tf.train.SummaryWriter(
            train_log_dir, sess.graph)
        self.valid_summary_writer = tf.train.SummaryWriter(
            valid_log_dir, sess.graph)
        self.saver = tf.train.Saver(tf.all_variables())
Esempio n. 7
0
    def __init__(self, sess, config, data_feed, log_dir):

        vocab_size = len(data_feed.vocab)
        self.data_feed = data_feed

        with tf.name_scope("io"):
            self.inputs = tf.placeholder(dtype=tf.int32,
                                         shape=(None, None),
                                         name="input_seq")
            self.input_lens = tf.placeholder(dtype=tf.int32,
                                             shape=(None, ),
                                             name="seq_len")
            self.da_labels = tf.placeholder(dtype=tf.int32,
                                            shape=(None, ),
                                            name="dialog_acts")
            self.senti_labels = tf.placeholder(
                dtype=tf.float32,
                shape=(None, data_feed.feature_size[data_feed.SENTI_ID]),
                name="sentiments")

            self.learning_rate = tf.Variable(float(config.init_lr),
                                             trainable=False)
            self.learning_rate_decay_op = self.learning_rate.assign(
                self.learning_rate * config.lr_decay)

        max_sent_len = array_ops.shape(self.inputs)[1]
        batch_size = array_ops.shape(self.inputs)[0]

        with variable_scope.variable_scope("word-embedding"):
            embedding = tf.get_variable("embedding",
                                        [vocab_size, config.embed_size],
                                        dtype=tf.float32)
            input_embedding = embedding_ops.embedding_lookup(
                embedding,
                tf.squeeze(tf.reshape(self.inputs, [-1, 1]), squeeze_dims=[1]))

            input_embedding = tf.reshape(input_embedding,
                                         [-1, max_sent_len, config.embed_size])

        with variable_scope.variable_scope("rnn"):
            if config.cell_type == "gru":
                cell = rnn_cell.GRUCell(config.cell_size)
            elif config.cell_type == "lstm":
                cell = rnn_cell.LSTMCell(config.cell_size,
                                         use_peepholes=False,
                                         forget_bias=1.0)
            elif config.cell_type == "rnn":
                cell = rnn_cell.BasicRNNCell(config.cell_size)
            else:
                raise ValueError("unknown RNN type")

            if config.keep_prob < 1.0:
                cell = rnn_cell.DropoutWrapper(
                    cell,
                    output_keep_prob=config.keep_prob,
                    input_keep_prob=config.keep_prob)

            if config.num_layer > 1:
                cell = rnn_cell.MultiRNNCell([cell] * config.num_layer,
                                             state_is_tuple=True)

            # and enc_last_state will be same as the true last state
            outputs, _ = tf.nn.dynamic_rnn(
                cell,
                input_embedding,
                dtype=tf.float32,
                sequence_length=self.input_lens,
            )
            # get the TRUE last outputs
            last_outputs = tf.reduce_sum(
                tf.mul(
                    outputs,
                    tf.expand_dims(
                        tf.one_hot(self.input_lens - 1, max_sent_len), -1)), 1)

            self.dialog_acts = self.fnn(
                last_outputs, data_feed.feature_size[data_feed.DA_ID], [100],
                "dialog_act_fnn")
            self.sentiments = self.fnn(
                last_outputs, data_feed.feature_size[data_feed.SENTI_ID],
                [100], "setiment_fnn")

        self.loss = tf.reduce_sum(nn_ops.sparse_softmax_cross_entropy_with_logits(self.dialog_acts, self.da_labels)) \
                    + tf.reduce_sum(nn_ops.softmax_cross_entropy_with_logits(self.sentiments, self.senti_labels))
        self.loss /= tf.to_float(batch_size)

        tf.scalar_summary("entropy_loss", self.loss)
        self.summary_op = tf.merge_all_summaries()

        # weight decay
        tvars = tf.trainable_variables()
        for v in tvars:
            print("Trainable %s" % v.name)
        # optimization
        if config.op == "adam":
            print("Use Adam")
            optimizer = tf.train.AdamOptimizer(self.learning_rate)
        elif config.op == "rmsprop":
            print("Use RMSProp")
            optimizer = tf.train.RMSPropOptimizer(self.learning_rate)
        else:
            print("Use SGD")
            optimizer = tf.train.GradientDescentOptimizer(self.learning_rate)

        grads, _ = tf.clip_by_global_norm(tf.gradients(self.loss, tvars),
                                          config.grad_clip)
        self.train_ops = optimizer.apply_gradients(zip(grads, tvars))
        self.saver = tf.train.Saver(tf.all_variables(),
                                    write_version=tf.train.SaverDef.V2)

        if log_dir is not None:
            train_log_dir = os.path.join(log_dir, "train")
            print("Save summary to %s" % log_dir)
            self.train_summary_writer = tf.train.SummaryWriter(
                train_log_dir, sess.graph)