def testLinear(self): with self.test_session() as sess: with tf.variable_scope("root", initializer=tf.constant_initializer(1.0)): x = tf.zeros([1, 2]) l = linear([x], 2, False) sess.run([tf.global_variables_initializer()]) res = sess.run([l], {x.name: np.array([[1., 2.]])}) self.assertAllClose(res[0], [[3.0, 3.0]]) # Checks prevent you from accidentally creating a shared function. with self.assertRaises(ValueError): l1 = linear([x], 2, False) # But you can create a new one in a new scope and share the variables. with tf.variable_scope("l1") as new_scope: l1 = linear([x], 2, False) with tf.variable_scope(new_scope, reuse=True): linear([l1], 2, False) self.assertEqual(len(tf.trainable_variables()), 2)
def attention(query): """Put attention masks on hidden using hidden_features and query.""" ds = [] # Results of attention reads will be stored here. for a in xrange(num_heads): with tf.variable_scope("Attention_%d" % a): y = linear(query, attention_vec_size, True) y = tf.reshape(y, [-1, 1, 1, attention_vec_size]) # Attention mask is a softmax of v^T * tanh(...). s = tf.reduce_sum( v[a] * tf.nn.tanh(hidden_features[a] + y), [2, 3]) a = tf.nn.softmax(s) # Now calculate the attention-weighted vector d. d = tf.reduce_sum(tf.reshape(a, [-1, attn_length, 1, 1]) * hidden,[1, 2]) # for c in range(ct): ds.append(tf.reshape(d, [-1, attn_size])) return ds
def basic_rnn_cell(inputs, state, num_units, scope=None): if state is None: if inputs is not None: batch_size = inputs.get_shape()[0] dtype = inputs.dtype else: batch_size = 0 dtype = tf.float32 init_output = tf.zeros(tf.stack([batch_size, num_units]), dtype=dtype) init_state = tf.zeros(tf.stack([batch_size, num_units]), dtype=dtype) init_output.set_shape([batch_size, num_units]) init_state.set_shape([batch_size, num_units]) return init_output, init_state else: with tf.variable_scope(scope, "basic_rnn_cell", [inputs, state]): output = tf.tanh(linear([inputs, state], num_units, True)) return output, output
def batch_linear(args, output_size, bias): ''' Apply linear map to a batch of matrices. args: a 3D Tensor or a list of 3D, batch x n x m, Tensors. ''' if not nest.is_sequence(args): args = [args] batch_size = args[0].get_shape().as_list()[0] or tf.shape(args[0])[0] flat_args = [] for arg in args: m = arg.get_shape().as_list()[2] if not m: raise ValueError('batch_linear expects shape[2] of arguments: %s' % str(m)) flat_args.append(tf.reshape(arg, [-1, m])) flat_output = linear(flat_args, output_size, bias) output = tf.reshape(flat_output, [batch_size, -1, output_size]) return output
def _output_project(self, output, attn, project_size): with tf.variable_scope("AttnOutputProjection"): new_output = activation(linear([output, attn], project_size, False)) return new_output
def beam_attention_decoder(decoder_inputs, initial_state, attention_states, cell, output_size=None, num_heads=1, loop_function=None, dtype=tf.float32, scope=None, initial_state_attention=False, output_projection=None, beam_size=10): """RNN decoder with attention for the sequence-to-sequence model. In this context "attention" means that, during decoding, the RNN can look up information in the additional tensor attention_states, and it does this by focusing on a few entries from the tensor. This model has proven to yield especially good results in a number of sequence-to-sequence tasks. This implementation is based on http://arxiv.org/abs/1412.7449 (see below for details). It is recommended for complex sequence-to-sequence tasks. Args: decoder_inputs: A list of 2D Tensors [batch_size x input_size]. initial_state: 2D Tensor [batch_size x cell.state_size]. attention_states: 3D Tensor [batch_size x attn_length x attn_size]. cell: rnn_cell.RNNCell defining the cell function and size. output_size: Size of the output vectors; if None, we use cell.output_size. num_heads: Number of attention heads that read from attention_states. loop_function: If not None, this function will be applied to i-th output in order to generate i+1-th input, and decoder_inputs will be ignored, except for the first element ("GO" symbol). This can be used for decoding, but also for training to emulate http://arxiv.org/abs/1506.03099. Signature -- loop_function(prev, i) = next * prev is a 2D Tensor of shape [batch_size x output_size], * i is an integer, the step number (when advanced control is needed), * next is a 2D Tensor of shape [batch_size x input_size]. dtype: The dtype to use for the RNN initial state (default: tf.float32). scope: VariableScope for the created subgraph; default: "attention_decoder". initial_state_attention: If False (default), initial attentions are zero. If True, initialize the attentions from the initial state and attention states -- useful when we wish to resume decoding from a previously stored decoder state and attention states. Returns: A tuple of the form (outputs, state), where: outputs: A list of the same length as decoder_inputs of 2D Tensors of shape [batch_size x output_size]. These represent the generated outputs. Output i is computed from input i (which is either the i-th element of decoder_inputs or loop_function(output {i-1}, i)) as follows. First, we run the cell on a combination of the input and previous attention masks: cell_output, new_state = cell(linear(input, prev_attn), prev_state). Then, we calculate new attention masks: new_attn = softmax(V^T * tanh(W * attention_states + U * new_state)) and then we calculate the output: output = linear(cell_output, new_attn). state: The state of each decoder cell the final time-step. It is a 2D Tensor of shape [batch_size x cell.state_size]. Raises: ValueError: when num_heads is not positive, there are no inputs, shapes of attention_states are not set, or input size cannot be inferred from the input. """ if not decoder_inputs: raise ValueError("Must provide at least 1 input to attention decoder.") if num_heads < 1: raise ValueError("With less than 1 heads, use a non-attention decoder.") if not attention_states.get_shape()[1:2].is_fully_defined(): raise ValueError("Shape[1] and [2] of attention_states must be known: %s" % attention_states.get_shape()) if output_size is None: output_size = cell.output_size with tf.variable_scope(scope or "attention_decoder"): batch_size = tf.shape(decoder_inputs[0])[0] # Needed for reshaping. attn_length = attention_states.get_shape()[1].value attn_size = attention_states.get_shape()[2].value # To calculate W1 * h_t we use a 1-by-1 convolution, need to reshape before. hidden = tf.reshape( attention_states, [-1, attn_length, 1, attn_size]) hidden_features = [] v = [] attention_vec_size = attn_size # Size of query vectors for attention. for a in xrange(num_heads): k = tf.get_variable("AttnW_%d" % a, [1, 1, attn_size, attention_vec_size]) hidden_features.append(tf.nn.conv2d(hidden, k, [1, 1, 1, 1], "SAME")) v.append(tf.get_variable("AttnV_%d" % a, [attention_vec_size])) print("Initial_state") state_size = int(initial_state.get_shape().with_rank(2)[1]) states =[] for kk in range(1): states.append(initial_state) state = tf.reshape(tf.concat(axis=0, values=states), [-1, state_size]) def attention(query): """Put attention masks on hidden using hidden_features and query.""" ds = [] # Results of attention reads will be stored here. for a in xrange(num_heads): with tf.variable_scope("Attention_%d" % a): y = linear(query, attention_vec_size, True) y = tf.reshape(y, [-1, 1, 1, attention_vec_size]) # Attention mask is a softmax of v^T * tanh(...). s = tf.reduce_sum( v[a] * tf.nn.tanh(hidden_features[a] + y), [2, 3]) a = tf.nn.softmax(s) # Now calculate the attention-weighted vector d. d = tf.reduce_sum(tf.reshape(a, [-1, attn_length, 1, 1]) * hidden,[1, 2]) # for c in range(ct): ds.append(tf.reshape(d, [-1, attn_size])) return ds outputs = [] prev = None batch_attn_size = tf.stack([batch_size, attn_size]) attns = [tf.zeros(batch_attn_size, dtype=dtype) for _ in xrange(num_heads)] for a in attns: # Ensure the second shape of attention vectors is set. a.set_shape([None, attn_size]) if initial_state_attention: attns = [] attns.append(attention(initial_state)) tmp = tf.reshape(tf.concat(axis=0, values=attns), [-1, attn_size]) attns = [] attns.append(tmp) log_beam_probs, beam_path, beam_symbols = [],[],[] for i, inp in enumerate(decoder_inputs): if i > 0: tf.get_variable_scope().reuse_variables() # If loop_function is set, we use it instead of decoder_inputs. if loop_function is not None : with tf.variable_scope("loop_function", reuse=True): if prev is not None: inp = loop_function(prev, i,log_beam_probs, beam_path, beam_symbols) input_size = inp.get_shape().with_rank(2)[1] x = linear([inp] + attns, input_size, True) cell_output, state = cell(x, state) # Run the attention mechanism. if i == 0 and initial_state_attention: with tf.variable_scope(tf.get_variable_scope(), reuse=True): attns = attention(state) else: attns = attention(state) with tf.variable_scope("AttnOutputProjection"): output = linear([cell_output] + attns, output_size, True) if loop_function is not None: prev = output if i ==0: states =[] for kk in range(beam_size): states.append(state) state = tf.reshape(tf.concat(axis=0, values=states), [-1, state_size]) with tf.variable_scope(tf.get_variable_scope(), reuse=True): attns = attention(state) outputs.append(tf.argmax(tf.xw_plus_b( output, output_projection[0], output_projection[1]), axis=1)) return outputs, state, tf.reshape(tf.concat(axis=0, values=beam_path),[-1,beam_size]), tf.reshape(tf.concat(axis=0, values=beam_symbols),[-1,beam_size])