def __init__(self, is_training, length): self.batch_size = batch_size = FLAGS.batch_size self.num_steps = num_steps = length hidden_size = FLAGS.hidden_dim self._input_data = tf.placeholder(tf.float32, [batch_size, None, FLAGS.input_dim]) self._targets = tf.placeholder(tf.float32, [batch_size, None, FLAGS.output_dim]) if FLAGS.model == "rnn": vanilla_rnn_cell = rnn_cell.BasicRNNCell(num_units=FLAGS.hidden_dim) if is_training and FLAGS.keep_prob < 1: vanilla_rnn_cell = rnn_cell.DropoutWrapper(vanilla_rnn_cell, output_keep_prob=FLAGS.keep_prob) if FLAGS.layer == 1: cell = vanilla_rnn_cell elif FLAGS.layer == 2: cell = rnn_cell.MultiRNNCell([vanilla_rnn_cell] * 2) elif FLAGS.model == "lstm": lstm_cell = rnn_cell.BasicLSTMCell(num_units=FLAGS.hidden_dim, forget_bias=1.0) if is_training and FLAGS.keep_prob < 1: lstm_cell = rnn_cell.DropoutWrapper(lstm_cell, output_keep_prob=FLAGS.keep_prob) if FLAGS.layer == 1: cell = lstm_cell elif FLAGS.layer == 2: cell = rnn_cell.MultiRNNCell([lstm_cell] * 2) elif FLAGS.model == "gru": gru_cell = rnn_cell.GRUCell(num_units=FLAGS.hidden_dim) if is_training and FLAGS.keep_prob < 1: gru_cell = rnn_cell.DropoutWrapper(gru_cell, output_keep_prob=FLAGS.keep_prob) cell = gru_cell else: raise ValueError("Invalid model: %s", FLAGS.model) self._initial_state = cell.zero_state(batch_size, tf.float32) outputs = [] state = self._initial_state with tf.variable_scope("RNN"): for time_step in range(num_steps): if time_step > 0: tf.get_variable_scope().reuse_variables() (cell_output, state) = cell(self._input_data[:, time_step, :], state) outputs.append(cell_output) self._final_state = state hidden_output = tf.reshape(tf.concat(1, outputs), [-1, hidden_size]) V_1 = tf.get_variable("v_1", shape=[hidden_size, FLAGS.output_dim], initializer=tf.random_uniform_initializer(-tf.sqrt(1./hidden_size),tf.sqrt(1./hidden_size))) b_1 = tf.get_variable("b_1", shape=[FLAGS.output_dim], initializer=tf.constant_initializer(0.1)) logits = tf.add(tf.matmul(hidden_output, V_1), b_1) target = tf.reshape(self._targets, [-1, FLAGS.output_dim]) training_loss = tf.reduce_sum(tf.pow(logits-target, 2)) / 2 mse = tf.reduce_mean(tf.pow(logits-target, 2)) self._cost = mse if not is_training: return self._lr = tf.Variable(0.0, trainable=False) tvars = tf.trainable_variables() grads, _ = tf.clip_by_global_norm(tf.gradients(training_loss, tvars), FLAGS.max_grad_norm) optimizer = tf.train.GradientDescentOptimizer(self.lr) self._train_op = optimizer.apply_gradients(zip(grads, tvars))
def __init__(self, is_training, config): self.batch_size = batch_size = config.batch_size self.num_steps = num_steps = config.num_steps size = config.hidden_size vocab_size = config.vocab_size self._input_data = tf.placeholder(tf.int32, [batch_size, num_steps]) self._targets = tf.placeholder(tf.int32, [batch_size, num_steps]) # Slightly better results can be obtained with forget gate biases # initialized to 1 but the hyperparameters of the model would need to be # different than reported in the paper. lstm_cell = rnn_cell.BasicLSTMCell(size, forget_bias=0.0) if is_training and config.keep_prob < 1: lstm_cell = rnn_cell.DropoutWrapper( lstm_cell, output_keep_prob=config.keep_prob) cell = rnn_cell.MultiRNNCell([lstm_cell] * config.num_layers) self._initial_state = cell.zero_state(batch_size, tf.float32) with tf.device("/cpu:0"): embedding = tf.get_variable("embedding", [vocab_size, size]) inputs = tf.nn.embedding_lookup(embedding, self._input_data) if is_training and config.keep_prob < 1: inputs = tf.nn.dropout(inputs, config.keep_prob) # Simplified version of tensorflow.models.rnn.rnn.py's rnn(). # This builds an unrolled LSTM for tutorial purposes only. # In general, use the rnn() or state_saving_rnn() from rnn.py. # # The alternative version of the code below is: # # from tensorflow.models.rnn import rnn # inputs = [tf.squeeze(input_, [1]) # for input_ in tf.split(1, num_steps, inputs)] # outputs, state = rnn.rnn(cell, inputs, initial_state=self._initial_state) outputs = [] state = self._initial_state with tf.variable_scope("RNN"): for time_step in range(num_steps): if time_step > 0: tf.get_variable_scope().reuse_variables() (cell_output, state) = cell(inputs[:, time_step, :], state) outputs.append(cell_output) output = tf.reshape(tf.concat(1, outputs), [-1, size]) softmax_w = tf.get_variable("softmax_w", [size, vocab_size]) softmax_b = tf.get_variable("softmax_b", [vocab_size]) logits = tf.matmul(output, softmax_w) + softmax_b loss = seq2seq.sequence_loss_by_example( [logits], [tf.reshape(self._targets, [-1])], [tf.ones([batch_size * num_steps])]) self._cost = cost = tf.reduce_sum(loss) / batch_size self._final_state = state if not is_training: return self._lr = tf.Variable(0.0, trainable=False) tvars = tf.trainable_variables() grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars), config.max_grad_norm) optimizer = tf.train.GradientDescentOptimizer(self.lr) self._train_op = optimizer.apply_gradients(zip(grads, tvars))
def __init__(self, source_vocab_size, target_vocab_size, buckets, hidden_edim, hidden_units, num_layers, keep_prob, max_gradient_norm, batch_size, learning_rate, learning_rate_decay_factor, beam_size, use_lstm=False, forward_only=False): """Create the model. Args: source_vocab_size: size of the source vocabulary. target_vocab_size: size of the target vocabulary. buckets: a list of pairs (I, O), where I specifies maximum input length that will be processed in that bucket, and O specifies maximum output length. Training instances that have inputs longer than I or outputs longer than O will be pushed to the next bucket and padded accordingly. We assume that the list is sorted, e.g., [(2, 4), (8, 16)]. hidden_edim: number of dimensions for word embedding hidden_units: number of hidden units for each layer num_layers: number of layers in the model. max_gradient_norm: gradients will be clipped to maximally this norm. batch_size: the size of the batches used during training; the model construction is independent of batch_size, so it can be changed after initialization if this is convenient, e.g., for decoding. learning_rate: learning rate to start with. learning_rate_decay_factor: decay learning rate by this much when needed. beam_size: the beam size used in beam search. use_lstm: if true, we use LSTM cells instead of GRU cells. forward_only: if set, we do not construct the backward pass in the model. """ self.source_vocab_size = source_vocab_size self.target_vocab_size = target_vocab_size self.buckets = buckets self.batch_size = batch_size self.learning_rate = tf.Variable(float(learning_rate), trainable=False) self.learning_rate_decay_op = self.learning_rate.assign( self.learning_rate * learning_rate_decay_factor) self.global_step = tf.Variable(0, trainable=False) def loss_function(logit, target, output_projection): logit = math_ops.matmul(logit, output_projection, transpose_b=True) target = array_ops.reshape(target, [-1]) crossent = nn_ops.sparse_softmax_cross_entropy_with_logits( logit, target) return crossent softmax_loss_function = loss_function # Create the internal multi-layer cell for our RNN. single_cell = rnn_cell.GRUCell(hidden_units) if use_lstm: single_cell = rnn_cell.BasicLSTMCell( hidden_units) # added by yfeng cell = single_cell if num_layers > 1: cell = rnn_cell.MultiRNNCell([single_cell] * num_layers) if not forward_only: cell = rnn_cell.DropoutWrapper(cell, input_keep_prob=keep_prob, seed=SEED) # The seq2seq function: we use embedding for the input and attention. def seq2seq_f(encoder_inputs, encoder_mask, encoder_probs, encoder_ids, encoder_hs, mem_mask, decoder_inputs, do_decode): return seq2seq_fy.embedding_attention_seq2seq( encoder_inputs, encoder_mask, encoder_probs, encoder_ids, encoder_hs, mem_mask, decoder_inputs, cell, num_encoder_symbols=source_vocab_size, num_decoder_symbols=target_vocab_size, embedding_size=hidden_edim, beam_size=beam_size, num_layers=num_layers, feed_previous=do_decode) # Feeds for inputs. self.encoder_inputs = [] self.decoder_inputs = [] self.target_weights = [] self.decoder_aligns = [] self.decoder_align_weights = [] for i in xrange(buckets[-1][0]): # Last bucket is the biggest one. self.encoder_inputs.append( tf.placeholder(tf.int32, shape=[None], name="encoder{0}".format(i))) for i in xrange(buckets[-1][1] + 1): self.decoder_inputs.append( tf.placeholder(tf.int32, shape=[None], name="decoder{0}".format(i))) self.target_weights.append( tf.placeholder(tf.float32, shape=[None], name="weight{0}".format(i))) self.decoder_aligns.append( tf.placeholder(tf.float32, shape=[None, None], name="align{0}".format(i))) self.decoder_align_weights.append( tf.placeholder(tf.float32, shape=[None], name="align_weight{0}".format(i))) self.encoder_mask = tf.placeholder(tf.int32, shape=[None, None], name="encoder_mask") self.encoder_probs = tf.placeholder( tf.float32, shape=[None, None, self.target_vocab_size], name="encoder_prob") self.encoder_ids = tf.placeholder(tf.int32, shape=[None, None], name="encoder_id") self.encoder_hs = tf.placeholder(tf.float32, shape=[None, None, None], name="encoder_h") self.mem_mask = tf.placeholder(tf.float32, shape=[None, None], name="mem_mask") # Our targets are decoder inputs shifted by one. targets = [ self.decoder_inputs[i + 1] for i in xrange(len(self.decoder_inputs) - 1) ] # Training outputs and losses. if forward_only: self.outputs, self.losses, self.symbols = seq2seq_fy.model_with_buckets( self.encoder_inputs, self.encoder_mask, self.encoder_probs, self.encoder_ids, self.encoder_hs, self.mem_mask, self.decoder_inputs, targets, self.target_weights, self.decoder_aligns, self.decoder_align_weights, buckets, lambda x, y, z, s, a, b, c: seq2seq_f(x, y, z, s, a, b, c, True ), softmax_loss_function=softmax_loss_function) else: self.outputs, self.losses, self.symbols = seq2seq_fy.model_with_buckets( self.encoder_inputs, self.encoder_mask, self.encoder_probs, self.encoder_ids, self.encoder_hs, self.mem_mask, self.decoder_inputs, targets, self.target_weights, self.decoder_aligns, self.decoder_align_weights, buckets, lambda x, y, z, s, a, b, c: seq2seq_f(x, y, z, s, a, b, c, False), softmax_loss_function=softmax_loss_function) # only update memory attention parameters params_to_update = [ p for p in tf.trainable_variables() if p.name in [ u'beta1_power:0', u'beta2_power:0', u'embedding_attention_seq2seq/embedding_attention_decoder/attention_decoder/attention/AttnVt_0:0', u'embedding_attention_seq2seq/embedding_attention_decoder/attention_decoder/attention/AttnWt_0:0', u'embedding_attention_seq2seq/embedding_attention_decoder/attention_decoder/attention/AttnU_0/Linear_mem/Matrix:0', u'embedding_attention_seq2seq/embedding_attention_decoder/attention_decoder/attention/AttnU_0/Linear_mem/Bias:0', u'embedding_attention_seq2seq/embedding_attention_decoder/attention_decoder/attention/AttnVt_0/Adam:0', u'embedding_attention_seq2seq/embedding_attention_decoder/attention_decoder/attention/AttnVt_0/Adam_1:0', u'embedding_attention_seq2seq/embedding_attention_decoder/attention_decoder/attention/AttnWt_0/Adam:0', u'embedding_attention_seq2seq/embedding_attention_decoder/attention_decoder/attention/AttnWt_0/Adam_1:0', u'embedding_attention_seq2seq/embedding_attention_decoder/attention_decoder/attention/AttnU_0/Linear_mem/Matrix/Adam:0', u'embedding_attention_seq2seq/embedding_attention_decoder/attention_decoder/attention/AttnU_0/Linear_mem/Matrix/Adam_1:0', u'embedding_attention_seq2seq/embedding_attention_decoder/attention_decoder/attention/AttnU_0/Linear_mem/Bias/Adam:0', u'embedding_attention_seq2seq/embedding_attention_decoder/attention_decoder/attention/AttnU_0/Linear_mem/Bias/Adam_1:0' ] ] if not forward_only: self.gradient_norms = [] self.gradient_norms_print = [] self.updates = [] opt = tf.train.AdamOptimizer(learning_rate=self.learning_rate) for b in xrange(len(buckets)): gradients = tf.gradients( self.losses[b], params_to_update, aggregation_method=tf.AggregationMethod.EXPERIMENTAL_TREE) clipped_gradients, norm = tf.clip_by_global_norm( gradients, max_gradient_norm) self.gradient_norms.append(norm) self.updates.append( opt.apply_gradients(zip(clipped_gradients, params_to_update), global_step=self.global_step)) # load trained NMT parameters params_to_load = [ p for p in tf.all_variables() if p.name not in [ u'beta1_power:0', u'beta2_power:0', u'embedding_attention_seq2seq/embedding_attention_decoder/attention_decoder/attention/AttnVt_0:0', u'embedding_attention_seq2seq/embedding_attention_decoder/attention_decoder/attention/AttnWt_0:0', u'embedding_attention_seq2seq/embedding_attention_decoder/attention_decoder/attention/AttnU_0/Linear_mem/Matrix:0', u'embedding_attention_seq2seq/embedding_attention_decoder/attention_decoder/attention/AttnU_0/Linear_mem/Bias:0', u'embedding_attention_seq2seq/embedding_attention_decoder/attention_decoder/attention/AttnVt_0/Adam:0', u'embedding_attention_seq2seq/embedding_attention_decoder/attention_decoder/attention/AttnVt_0/Adam_1:0', u'embedding_attention_seq2seq/embedding_attention_decoder/attention_decoder/attention/AttnWt_0/Adam:0', u'embedding_attention_seq2seq/embedding_attention_decoder/attention_decoder/attention/AttnWt_0/Adam_1:0', u'embedding_attention_seq2seq/embedding_attention_decoder/attention_decoder/attention/AttnU_0/Linear_mem/Matrix/Adam:0', u'embedding_attention_seq2seq/embedding_attention_decoder/attention_decoder/attention/AttnU_0/Linear_mem/Matrix/Adam_1:0', u'embedding_attention_seq2seq/embedding_attention_decoder/attention_decoder/attention/AttnU_0/Linear_mem/Bias/Adam:0', u'embedding_attention_seq2seq/embedding_attention_decoder/attention_decoder/attention/AttnU_0/Linear_mem/Bias/Adam_1:0' ] ] # only save memory attention parameters params_to_save = [ p for p in tf.all_variables() if p.name in [ u'Variable:0', u'Variable_1:0', u'beta1_power:0', u'beta2_power:0', u'embedding_attention_seq2seq/embedding_attention_decoder/attention_decoder/attention/AttnVt_0:0', u'embedding_attention_seq2seq/embedding_attention_decoder/attention_decoder/attention/AttnWt_0:0', u'embedding_attention_seq2seq/embedding_attention_decoder/attention_decoder/attention/AttnU_0/Linear_mem/Matrix:0', u'embedding_attention_seq2seq/embedding_attention_decoder/attention_decoder/attention/AttnU_0/Linear_mem/Bias:0', u'embedding_attention_seq2seq/embedding_attention_decoder/attention_decoder/attention/AttnVt_0/Adam:0', u'embedding_attention_seq2seq/embedding_attention_decoder/attention_decoder/attention/AttnVt_0/Adam_1:0', u'embedding_attention_seq2seq/embedding_attention_decoder/attention_decoder/attention/AttnWt_0/Adam:0', u'embedding_attention_seq2seq/embedding_attention_decoder/attention_decoder/attention/AttnWt_0/Adam_1:0', u'embedding_attention_seq2seq/embedding_attention_decoder/attention_decoder/attention/AttnU_0/Linear_mem/Matrix/Adam:0', u'embedding_attention_seq2seq/embedding_attention_decoder/attention_decoder/attention/AttnU_0/Linear_mem/Matrix/Adam_1:0', u'embedding_attention_seq2seq/embedding_attention_decoder/attention_decoder/attention/AttnU_0/Linear_mem/Bias/Adam:0', u'embedding_attention_seq2seq/embedding_attention_decoder/attention_decoder/attention/AttnU_0/Linear_mem/Bias/Adam_1:0', ] ] self.saver_old = tf.train.Saver(params_to_load, max_to_keep=1000, keep_checkpoint_every_n_hours=6) self.saver = tf.train.Saver(params_to_save, max_to_keep=1000, keep_checkpoint_every_n_hours=6)
def __init__(self, config): self._config = config # Input placeholders self._input_seq = tf.placeholder(tf.int32, [None, config.seq_length], name='input_seq') self._target_seq = tf.placeholder(tf.int32, [None, config.seq_length], name='target_seq') embedding = tf.get_variable('embedding', [config.vocab_size, config.hidden_size]) inputs = tf.gather(embedding, self._input_seq) # Hidden layers: stacked LSTM cells with Dropout. with tf.variable_scope("RNN"): if config.cell_type == 'lstm': cell = rnn_cell.BasicLSTMCell(config.is_training, config.hidden_size) elif config.cell_type == 'bnlstm': cell = rnn_cell.BNLSTMCell(config.is_training, config.hidden_size) elif config.cell_type == 'gru': cell = rnn_cell.GRUCell(config.is_training, config.hidden_size) elif config.cell_type == 'bngru.full': cell = rnn_cell.BNGRUCell(config.is_training, config.hidden_size, full_bn=True) elif config.cell_type == 'bngru.simple': cell = rnn_cell.BNGRUCell(config.is_training, config.hidden_size, full_bn=False) else: raise ValueError('Unknown cell_type: %s' % config.cell_type) # Apply dropout if we're training. if config.is_training and config.keep_prob < 1.0: self._cell = cell = rnn_cell.DropoutWrapper( cell, input_keep_prob=config.keep_prob, output_keep_prob=config.keep_prob) # No implementation of MultiRNNCell in our own rnn_cell.py yet # self._multi_cell = multi_cell = ( # tf.nn.rnn_cell.MultiRNNCell([cell] * config.hidden_depth)) self._cell = cell # Placeholder for initial hidden state. self._initial_state = tf.placeholder(tf.float32, [None, cell.state_size], name="initial_state") # Split inputs into individual timesteps for BPTT. split_input = [ tf.squeeze(_input, squeeze_dims=[1]) for _input in tf.split(1, config.seq_length, inputs) ] # Create the recurrent network. with tf.variable_scope("RNN"): state = self._initial_state outputs = [] for time_step in range(config.seq_length): if time_step > config.pop_step: tf.get_variable_scope().reuse_variables() cell_output, state = cell(split_input[time_step], state, config.pop_step) else: cell_output, state = cell(split_input[time_step], state, time_step) outputs.append(cell_output) self._final_state = state # Reshape the output to [(batch_size * seq_length), hidden_size] outputs = tf.reshape(tf.concat(1, outputs), [-1, config.hidden_size]) # Softmax softmax_w = tf.get_variable( 'softmax_w', [config.vocab_size, config.hidden_size], #initializer=orthogonal_initializer) initializer=None) softmax_b = tf.get_variable('softmax_b', [config.vocab_size]) self._logits = tf.matmul(outputs, tf.transpose(softmax_w)) + softmax_b self._probs = tf.nn.softmax(self._logits) # Average cross-entropy loss within the batch. loss_tensor = tf.nn.sparse_softmax_cross_entropy_with_logits( self._logits, tf.reshape(self._target_seq, [-1])) self._loss = tf.reduce_sum(loss_tensor) / config.batch_size self._perplexity = tf.exp(self._loss / config.seq_length) # Optimizer if config.is_training: # shouldn't need this if but just in case tvars = tf.trainable_variables() grads, _ = tf.clip_by_global_norm(tf.gradients(self.loss, tvars), config.max_grad_norm) if config.optimizer == 'adam': optimizer = tf.train.AdamOptimizer(config.learning_rate) elif config.optimizer == 'sgd': optimizer = tf.train.GradientDescentOptimizer( config.learning_rate) elif config.optimizer == 'adagrad': optimizer = tf.train.AdagradOptimizer(config.learning_rate) else: raise ValueError('Invalid optimizer: %s' % config.optimizer) self._train_op = optimizer.apply_gradients(zip(grads, tvars)) if not config.is_training: self.merged_summaries = tf.merge_all_summaries()
def __init__( self, source_vocab_size_1, source_vocab_size_2, target_vocab_size, buckets, # size, #annotated by yfeng hidden_edim, hidden_units, # added by yfeng num_layers, max_gradient_norm, batch_size, learning_rate, learning_rate_decay_factor, beam_size, # added by shiyue constant_emb_en, # added by al constant_emb_fr, # added by al use_lstm=False, num_samples=10240, forward_only=False): """Create the model. Args: source_vocab_size: size of the source vocabulary. target_vocab_size: size of the target vocabulary. buckets: a list of pairs (I, O), where I specifies maximum input length that will be processed in that bucket, and O specifies maximum output length. Training instances that have inputs longer than I or outputs longer than O will be pushed to the next bucket and padded accordingly. We assume that the list is sorted, e.g., [(2, 4), (8, 16)]. #size: number of units in each layer of the model.#annotated by yfeng hidden_edim: number of dimensions for word embedding hidden_units: number of hidden units for each layer num_layers: number of layers in the model. max_gradient_norm: gradients will be clipped to maximally this norm. batch_size: the size of the batches used during training; the model construction is independent of batch_size, so it can be changed after initialization if this is convenient, e.g., for decoding. learning_rate: learning rate to start with. learning_rate_decay_factor: decay learning rate by this much when needed. use_lstm: if true, we use LSTM cells instead of GRU cells. num_samples: number of samples for sampled softmax. forward_only: if set, we do not construct the backward pass in the model. """ self.source_vocab_size_1 = source_vocab_size_1 self.source_vocab_size_2 = source_vocab_size_2 self.target_vocab_size = target_vocab_size self.buckets = buckets self.batch_size = batch_size self.learning_rate = tf.Variable(float(learning_rate), trainable=False) self.learning_rate_decay_op = self.learning_rate.assign( self.learning_rate * learning_rate_decay_factor) self.global_step = tf.Variable(0, trainable=False) # If we use sampled softmax, we need an output projection. output_projection = None softmax_loss_function = None # Sampled softmax only makes sense if we sample less than vocabulary size. # if num_samples > 0 and num_samples < self.target_vocab_size: if num_samples > 0: # w = tf.get_variable("proj_w", [size, self.target_vocab_size]) #annotated by feng w = tf.get_variable("proj_w", [hidden_units // 2, self.target_vocab_size], initializer=tf.random_normal_initializer( 0, 0.01, seed=SEED)) # added by yfeng # w_t = tf.transpose(w) b = tf.get_variable("proj_b", [self.target_vocab_size], initializer=tf.constant_initializer(0.0), trainable=False) # added by yfeng output_projection = (w, b) def sampled_loss(logit, target): # labels = tf.reshape(labels, [-1, 1]) logit = nn_ops.xw_plus_b(logit, output_projection[0], output_projection[1]) # return tf.nn.sampled_softmax_loss(w_t, b, inputs, labels, num_samples, # self.target_vocab_size) target = array_ops.reshape(target, [-1]) return nn_ops.sparse_softmax_cross_entropy_with_logits( logit, target) softmax_loss_function = sampled_loss # Create the internal multi-layer cell for our RNN. # single_cell = tf.nn.rnn_cell.GRUCell(hidden_units) #annotated by yfeng single_cell = rnn_cell.GRUCell(hidden_units) # added by yfeng if use_lstm: # single_cell = tf.nn.rnn_cell.BasicLSTMCell(hidden_units) #annotated by yfeng single_cell = rnn_cell.BasicLSTMCell( hidden_units) # added by yfeng cell = single_cell if num_layers > 1: # modified by yfeng # cell = tf.nn.rnn_cell.MultiRNNCell([single_cell] * num_layers) cell = rnn_cell.MultiRNNCell([single_cell] * num_layers) # end by yfeng cell = rnn_cell.DropoutWrapper(cell, input_keep_prob=0.8, seed=SEED) # The seq2seq function: we use embedding for the input and attention. def seq2seq_f(encoder_inputs_1, encoder_inputs_2, encoder_mask_1, encoder_mask_2, decoder_inputs, do_decode): # return tf.nn.seq2seq.embedding_attention_seq2seq( #annnotated by yfeng return seq2seq_al.embedding_attention_seq2seq( # added by yfeng encoder_inputs_1, encoder_inputs_2, encoder_mask_1, encoder_mask_2, decoder_inputs, cell, num_encoder_symbols_1=source_vocab_size_1, num_encoder_symbols_2=source_vocab_size_2, num_decoder_symbols=target_vocab_size, # embedding_size=size, #annotated by yfeng embedding_size=hidden_edim, # added by yfeng beam_size=beam_size, # added by shiyue constant_emb_en=constant_emb_en, # added by al constant_emb_fr=constant_emb_fr, # added by al output_projection=output_projection, feed_previous=do_decode) # Feeds for inputs. self.encoder_inputs_1 = [] self.encoder_inputs_2 = [] self.decoder_inputs = [] self.target_weights = [] for i in xrange(buckets[-1][0]): # Last bucket is the biggest one. self.encoder_inputs_1.append( tf.placeholder(tf.int32, shape=[None], name="encoder{0}_1".format(i))) for i in xrange(buckets[-1][1]): # Last bucket is the biggest one. self.encoder_inputs_2.append( tf.placeholder(tf.int32, shape=[None], name="encoder{0}_2".format(i))) for i in xrange(buckets[-1][2] + 1): self.decoder_inputs.append( tf.placeholder(tf.int32, shape=[None], name="decoder{0}".format(i))) self.target_weights.append( tf.placeholder(tf.float32, shape=[None], name="weight{0}".format(i))) self.encoder_mask_1 = tf.placeholder(tf.int32, shape=[None, None], name="encoder_mask_1") self.encoder_mask_2 = tf.placeholder(tf.int32, shape=[None, None], name="encoder_mask_2") # Our targets are decoder inputs shifted by one. targets = [ self.decoder_inputs[i + 1] for i in xrange(len(self.decoder_inputs) - 1) ] # Training outputs and losses. if forward_only: # self.outputs, self.losses = tf.nn.seq2seq.model_with_buckets( #annotated by yfeng self.outputs, self.losses, self.symbols = seq2seq_al.model_with_buckets( # added by yfeng and shiyue self.encoder_inputs_1, self.encoder_inputs_2, self.encoder_mask_1, self.encoder_mask_2, self.decoder_inputs, targets, self.target_weights, buckets, lambda x1, x2, y1, y2, z: seq2seq_f(x1, x2, y1, y2, z, True), softmax_loss_function=softmax_loss_function) # If we use output projection, we need to project outputs for decoding. # annotated by shiyue, when using beam search, no need to do decoding projection # if output_projection is not None: # for b in xrange(len(buckets)): # self.outputs[b] = [ # tf.matmul(output, output_projection[0]) + output_projection[1] # for output in self.outputs[b] # ] # ended by shiyue else: # self.outputs, self.losses = tf.nn.seq2seq.model_with_buckets( #annotated by yfeng self.outputs, self.losses, self.symbols = seq2seq_al.model_with_buckets( # added by yfeng and shiyue self.encoder_inputs_1, self.encoder_inputs_2, self.encoder_mask_1, self.encoder_mask_2, self.decoder_inputs, targets, self.target_weights, buckets, lambda x1, x2, y1, y2, z: seq2seq_f(x1, x2, y1, y2, z, False), softmax_loss_function=softmax_loss_function) # Gradients and SGD update operation for training the model. params_to_update = tf.trainable_variables() if not forward_only: self.gradient_norms = [] self.gradient_norms_print = [] self.updates = [] # opt = tf.train.AdadeltaOptimizer(learning_rate=self.learning_rate, rho=0.95, epsilon=1e-6) opt = tf.train.AdamOptimizer(learning_rate=self.learning_rate) # opt = tf.train.GradientDescentOptimizer(self.learning_rate) #added by yfeng for b in xrange(len(buckets)): gradients = tf.gradients( self.losses[b], params_to_update, aggregation_method=tf.AggregationMethod.EXPERIMENTAL_TREE) # gradients_print = tf.gradients(self.losses[b], params_to_print) clipped_gradients, norm = tf.clip_by_global_norm( gradients, max_gradient_norm) # _, norm_print = tf.clip_by_global_norm(gradients_print, # max_gradient_norm) self.gradient_norms.append(norm) # self.gradient_norms_print.append(norm_print) self.updates.append( opt.apply_gradients(zip(clipped_gradients, params_to_update), global_step=self.global_step)) # self.saver = tf.train.Saver(tf.all_variables()) #annotated by yfeng self.saver = tf.train.Saver( tf.all_variables(), max_to_keep=1000, keep_checkpoint_every_n_hours=6) # added by yfeng
def __init__(self, source_vocab_size, target_vocab_size, buckets, hidden_edim, hidden_units, num_layers, keep_prob, max_gradient_norm, batch_size, learning_rate, learning_rate_decay_factor, beam_size, forward_only=False): """Create the model. Args: source_vocab_size: size of the source vocabulary. target_vocab_size: size of the target vocabulary. buckets: a list of pairs (I, O), where I specifies maximum input length that will be processed in that bucket, and O specifies maximum output length. Training instances that have inputs longer than I or outputs longer than O will be pushed to the next bucket and padded accordingly. We assume that the list is sorted, e.g., [(2, 4), (8, 16)]. hidden_edim: number of dimensions for word embedding hidden_units: number of hidden units for each layer num_layers: number of layers in the model. keep_prob: keep probability used for dropout. max_gradient_norm: gradients will be clipped to maximally this norm. batch_size: the size of the batches used during training; the model construction is independent of batch_size, so it can be changed after initialization if this is convenient, e.g., for decoding. learning_rate: learning rate to start with. learning_rate_decay_factor: decay learning rate by this much when needed. beam_size: the beam size for beam search decoding forward_only: if set, we do not construct the backward pass in the model. """ self.source_vocab_size = source_vocab_size self.target_vocab_size = target_vocab_size self.buckets = buckets self.batch_size = batch_size self.learning_rate = tf.Variable(float(learning_rate), trainable=False) self.learning_rate_decay_op = self.learning_rate.assign( self.learning_rate * learning_rate_decay_factor) self.global_step = tf.Variable(0, trainable=False) w = tf.get_variable("proj_w", [hidden_units // 2, self.target_vocab_size], initializer=tf.random_normal_initializer(0, 0.01, seed=123)) b = tf.get_variable("proj_b", [self.target_vocab_size], initializer=tf.constant_initializer(0.0), trainable=False) output_projection = (w, b) # before softmax, there is an output projection def softmax_loss_function(logit, target): # loss function of seq2seq model logit = nn_ops.xw_plus_b(logit, output_projection[0], output_projection[1]) target = array_ops.reshape(target, [-1]) return nn_ops.sparse_softmax_cross_entropy_with_logits( logit, target) single_cell = rnn_cell.GRUCell(hidden_units) cell = single_cell if num_layers > 1: cell = rnn_cell.MultiRNNCell([single_cell] * num_layers) if not forward_only: cell = rnn_cell.DropoutWrapper(cell, output_keep_prob=float(keep_prob), seed=123) # The seq2seq function: we use embedding for the input and attention. def seq2seq_f(encoder_inputs, encoder_mask, decoder_inputs, do_decode): return seq2seq.embedding_attention_seq2seq( encoder_inputs, encoder_mask, decoder_inputs, cell, num_encoder_symbols=source_vocab_size, num_decoder_symbols=target_vocab_size, embedding_size=hidden_edim, beam_size=beam_size, output_projection=output_projection, num_layers=num_layers, feed_previous=do_decode) # Feeds for inputs. self.encoder_inputs = [] self.decoder_inputs = [] self.target_weights = [] for i in xrange(buckets[-1][0]): # Last bucket is the biggest one. self.encoder_inputs.append(tf.placeholder(tf.int32, shape=[None], name="encoder{0}".format(i))) for i in xrange(buckets[-1][1] + 1): self.decoder_inputs.append(tf.placeholder(tf.int32, shape=[None], name="decoder{0}".format(i))) self.target_weights.append(tf.placeholder(tf.float32, shape=[None], name="weight{0}".format(i))) self.encoder_mask = tf.placeholder(tf.int32, shape=[None, None], name="encoder_mask") # Our targets are decoder inputs shifted by one. targets = [self.decoder_inputs[i + 1] for i in xrange(len(self.decoder_inputs) - 1)] # Training outputs and losses. if forward_only: self.outputs, self.losses, self.symbols = seq2seq.model_with_buckets( self.encoder_inputs, self.encoder_mask, self.decoder_inputs, targets, self.target_weights, buckets, lambda x, y, z: seq2seq_f(x, y, z, True), softmax_loss_function=softmax_loss_function) else: self.outputs, self.losses, self.symbols = seq2seq.model_with_buckets( self.encoder_inputs, self.encoder_mask, self.decoder_inputs, targets, self.target_weights, buckets, lambda x, y, z: seq2seq_f(x, y, z, False), softmax_loss_function=softmax_loss_function) # Gradients and SGD update operation for training the model. params_to_update = tf.trainable_variables() if not forward_only: self.gradient_norms = [] self.gradient_norms_print = [] self.updates = [] opt = tf.train.AdamOptimizer(learning_rate=self.learning_rate) for b in xrange(len(buckets)): gradients = tf.gradients(self.losses[b], params_to_update, aggregation_method=tf.AggregationMethod.EXPERIMENTAL_TREE) clipped_gradients, norm = tf.clip_by_global_norm(gradients, max_gradient_norm) self.gradient_norms.append(norm) self.updates.append(opt.apply_gradients( zip(clipped_gradients, params_to_update), global_step=self.global_step)) self.saver = tf.train.Saver(tf.all_variables(), max_to_keep=1000, # keep all checkpoints keep_checkpoint_every_n_hours=6)
def __init__(self, is_training, config): self.maxlen = maxlen = config.maxlen dim_proj = config.dim_proj n_words = config.n_words ydim = config.ydim # number of classes self._input_data = tf.placeholder(tf.int32, [None, maxlen]) self._mask_data = tf.placeholder(tf.float32, [None, maxlen]) self._targets = tf.placeholder(tf.int64, [None]) # make batch size flexible self._batch_size = batch_size = tf.placeholder(tf.int32, []) # length of sequences for current batch self._time_steps = tf.placeholder(tf.int32, []) # new lstm cell than has masking lstm_cell = rnn_cell.BasicLSTMCellMask(dim_proj) if is_training and config.keep_prob < 1: lstm_cell = \ rnn_cell.DropoutWrapper(lstm_cell, output_keep_prob=config.keep_prob) self._initial_state = lstm_cell.zero_state(batch_size, tf.float32) with tf.device("/cpu:0"): embedding = tf.get_variable("embedding", [n_words, dim_proj]) inputs = tf.nn.embedding_lookup(embedding, self._input_data) # Simplified version of tensorflow.models.rnn.rnn.py's rnn(). # This builds an unrolled LSTM for tutorial purposes only. In general, # the rnn() function should be updated with mask option and be used. zero_output = array_ops.zeros(array_ops.pack([batch_size, dim_proj]), tf.float32) output = array_ops.zeros(array_ops.pack([batch_size, dim_proj]), tf.float32) state = self._initial_state with tf.variable_scope("RNN"): for time_step in range(config.maxlen): if time_step > 0: tf.get_variable_scope().reuse_variables() call_cell = lambda: lstm_cell(inputs[:, time_step, :], state, self._mask_data[:, time_step]) empty_update = lambda: (zero_output, state) (cell_output, state) = \ control_flow_ops.cond(time_step < self._time_steps, call_cell, empty_update) output = tf.add(output, cell_output) # (adding 0 for short sentences) # find sum of the representations self._outputs_sum = output # find averaged feature of each sentence (shape: batch_size*dim_proj) sent_rep = tf.div(output, tf.expand_dims(tf.reduce_sum(self._mask_data, reduction_indices=1), 1)) self._sent_rep = sent_rep softmax_w = tf.get_variable("softmax_w", [dim_proj, ydim]) softmax_b = tf.get_variable("softmax_b", [ydim]) self._logits = tf.matmul(self._sent_rep, softmax_w) + softmax_b self._cost = cost = \ tf.reduce_mean( tf.nn.sparse_softmax_cross_entropy_with_logits(self._logits, self._targets)) self._final_state = state if not is_training: return self._lr = tf.Variable(0.0, trainable=False) tvars = tf.trainable_variables() grads = tf.gradients(cost, tvars) self._grads = grads optimizer = tf.train.GradientDescentOptimizer(self.lr) self._train_op = optimizer.apply_gradients(zip(grads, tvars))