def __call__(self, inputs, state, scope=None): with tf.variable_scope(scope or type(self).__name__): unitary_hidden_state, secondary_cell_hidden_state = tf.split( 1, 2, state) mat_in = tf.get_variable('mat_in', [self.input_size, self.state_size * 2]) mat_out = tf.get_variable('mat_out', [self.state_size * 2, self.output_size]) in_proj = tf.matmul(inputs, mat_in) in_proj_c = tf.complex(tf.split(1, 2, in_proj)) out_state = modReLU( in_proj_c + ulinear(unitary_hidden_state, self.state_size), tf.get_variable(name='bias', dtype=tf.float32, shape=tf.shape(unitary_hidden_state), initializer=tf.constant_initalizer(0.)), scope=scope) with tf.variable_scope('unitary_output'): '''computes data linear, unitary linear and summation -- TODO: should be complex output''' unitary_linear_output_real = linear.linear( [tf.real(out_state), tf.imag(out_state), inputs], True, 0.0) with tf.variable_scope('scale_nonlinearity'): modulus = tf.complex_abs(unitary_linear_output_real) rescale = tf.maximum(modulus + hidden_bias, 0.) / (modulus + 1e-7) # transition to data shortcut connection # out_ = tf.matmul(tf.concat(1,[tf.real(out_state), tf.imag(out_state), ] ), mat_out) + out_bias # hidden state is complex but output is completely real return out_, out_state # complex
def __call__(self, inputs, state, timestep=0, scope=None): with tf.device("/gpu:" + str(self._gpu_for_layer)): """Long short-term memory cell (LSTM).""" with tf.variable_scope(scope or type(self).__name__): # "BasicLSTMCell" # Parameters of gates are concatenated into one multiply for efficiency. h, c = tf.split(1, 2, state) concat = multiplicative_integration([inputs, h], self._num_units * 4, 0.0) # i = input_gate, j = new_input, f = forget_gate, o = output_gate i, j, f, o = tf.split(1, 4, concat) if self.use_recurrent_dropout and self.is_training: input_contribution = tf.nn.dropout( tf.tanh(j), self.recurrent_dropout_factor) else: input_contribution = tf.tanh(j) new_c = c * tf.sigmoid(f + self._forget_bias) + tf.sigmoid( i) * input_contribution new_h = tf.tanh(new_c) * tf.sigmoid(o) return new_h, tf.concat(1, [new_h, new_c]) # purposely reversed
def __call__(self, inputs, state, timestep=0, scope=None): with tf.variable_scope(scope or type(self).__name__): # "BasicLSTMCell" # Parameters of gates are concatenated into one multiply for efficiency. hidden_state_plus_c_list = tf.split(1, self.num_memory_arrays + 1, state) h = hidden_state_plus_c_list[0] c_list = hidden_state_plus_c_list[1:] '''very large matrix multiplication to speed up procedure -- will split variables out later''' if self.use_multiplicative_integration: concat = multiplicative_integration( [inputs, h], self._num_units * 4 * self.num_memory_arrays, 0.0) else: concat = linear([inputs, h], self._num_units * 4 * self.num_memory_arrays, True) if self.use_layer_normalization: concat = layer_norm(concat, num_variables_in_tensor=4 * self.num_memory_arrays) # i = input_gate, j = new_input, f = forget_gate, o = output_gate -- comes in sets of fours all_vars_list = tf.split(1, 4 * self.num_memory_arrays, concat) '''memory array loop''' new_c_list, new_h_list = [], [] for array_counter in xrange(self.num_memory_arrays): i = all_vars_list[0 + array_counter * 4] j = all_vars_list[1 + array_counter * 4] f = all_vars_list[2 + array_counter * 4] o = all_vars_list[3 + array_counter * 4] if self.use_recurrent_dropout and self.is_training: input_contribution = tf.nn.dropout( tf.tanh(j), self.recurrent_dropout_factor) else: input_contribution = tf.tanh(j) new_c_list.append(c_list[array_counter] * tf.sigmoid(f + self._forget_bias) + tf.sigmoid(i) * input_contribution) if self.use_layer_normalization: new_c = layer_norm(new_c_list[-1]) else: new_c = new_c_list[-1] new_h_list.append(tf.tanh(new_c) * tf.sigmoid(o)) '''sum all new_h components -- could instead do a mean -- but investigate that later''' new_h = tf.add_n(new_h_list) return new_h, tf.concat(1, [new_h] + new_c_list) # purposely reversed
def layer_norm(input_tensor, num_variables_in_tensor=1, initial_bias_value=0.0, scope="layer_norm"): with tf.variable_scope(scope): '''for clarification of shapes: input_tensor = [batch_size, num_neurons] mean = [batch_size] variance = [batch_size] alpha = [num_neurons] bias = [num_neurons] output = [batch_size, num_neurons] ''' input_tensor_shape_list = input_tensor.get_shape().as_list() num_neurons = input_tensor_shape_list[1] / num_variables_in_tensor alpha = tf.get_variable('layer_norm_alpha', [num_neurons * num_variables_in_tensor], initializer=tf.constant_initializer(1.0)) bias = tf.get_variable( 'layer_norm_bias', [num_neurons * num_variables_in_tensor], initializer=tf.constant_initializer(initial_bias_value)) if num_variables_in_tensor == 1: input_tensor_list = [input_tensor] alpha_list = [alpha] bias_list = [bias] else: input_tensor_list = tf.split(1, num_variables_in_tensor, input_tensor) alpha_list = tf.split(0, num_variables_in_tensor, alpha) bias_list = tf.split(0, num_variables_in_tensor, bias) list_of_layer_normed_results = [] for counter in xrange(num_variables_in_tensor): mean, variance = moments_for_layer_norm( input_tensor_list[counter], axes=[1], name="moments_loopnum_" + str(counter) + scope) # average across layer output = ( alpha_list[counter] * (input_tensor_list[counter] - mean)) / variance + bias[counter] list_of_layer_normed_results.append(output) if num_variables_in_tensor == 1: return list_of_layer_normed_results[0] else: return tf.concat(1, list_of_layer_normed_results)
def __call__(self, inputs, state, timestep=0, scope=None): """Normal Gated recurrent unit (GRU) with nunits cells.""" with tf.variable_scope(scope or type(self).__name__): # "GRUCell" with tf.variable_scope("Gates"): # Reset gate and update gate. # We start with bias of 1.0 to not reset and not udpate. r, u = tf.split( 1, 2, tf.sigmoid( multiplicative_integration([inputs, state], self._num_units * 2, 1.0))) with tf.variable_scope( "Candidate" ): # you need a different one because you're doing a new linear # notice they have the activation/non-linear step right here! c = tf.tanh( multiplicative_integration([inputs, state], self._num_units, 0.0)) if self.use_recurrent_dropout and self.is_training: input_contribution = tf.nn.dropout( c, self.recurrent_dropout_factor) else: input_contribution = c new_h = u * state + (1 - u) * input_contribution return new_h, new_h
def multiplicative_integration(list_of_inputs, output_size, initial_bias_value=0.0, weights_already_calculated=False, use_highway_gate=False, use_l2_loss=False, scope=None, timestep=0): '''expects len(2) for list of inputs and will perform integrative multiplication weights_already_calculated will treat the list of inputs as Wx and Uz and is useful for batch normed inputs ''' with tf.variable_scope(scope or 'double_inputs_multiple_integration'): if len(list_of_inputs) != 2: raise ValueError('list of inputs must be 2, you have:', len(list_of_inputs)) if weights_already_calculated: # if you already have weights you want to insert from batch norm Wx = list_of_inputs[0] Uz = list_of_inputs[1] else: with tf.variable_scope('Calculate_Wx_mulint'): Wx = linear.linear(list_of_inputs[0], output_size, False, use_l2_loss=use_l2_loss, timestep=timestep) with tf.variable_scope("Calculate_Uz_mulint"): Uz = linear.linear(list_of_inputs[1], output_size, False, use_l2_loss=use_l2_loss, timestep=timestep) with tf.variable_scope("multiplicative_integration"): alpha = tf.get_variable( 'mulint_alpha', [output_size], initializer=tf.truncated_normal_initializer(mean=1.0, stddev=0.1)) beta1, beta2 = tf.split( 0, 2, tf.get_variable('mulint_params_betas', [output_size * 2], initializer=tf.truncated_normal_initializer( mean=0.5, stddev=0.1))) original_bias = tf.get_variable( 'mulint_original_bias', [output_size], initializer=tf.truncated_normal_initializer( mean=initial_bias_value, stddev=0.1)) final_output = alpha * Wx * Uz + beta1 * Uz + beta2 * Wx + original_bias if use_highway_gate: final_output = highway_network.apply_highway_gate( final_output, list_of_inputs[0]) return final_output
def __call__(self, inputs, state, timestep=0, scope=None): """Long short-term memory cell (LSTM). The idea with iteration would be to run different batch norm mean and variance stats on timestep greater than 10 """ with tf.variable_scope(scope or type(self).__name__): # "BasicLSTMCell" # Parameters of gates are concatenated into one multiply for efficiency. h, c = tf.split(1, 2, state) '''note that bias is set to 0 because batch norm bias is added later''' with tf.variable_scope('inputs_weight_matrix'): inputs_concat = linear([inputs], 4 * self._num_units, False) inputs_concat = layer_norm(inputs_concat, num_variables_in_tensor=4, scope="inputs_concat_layer_norm") with tf.variable_scope('state_weight_matrix'): h_concat = linear([h], 4 * self._num_units, False) h_concat = layer_norm(h_concat, num_variables_in_tensor=4, scope="h_concat_layer_norm") i, j, f, o = tf.split( 1, 4, multiplicative_integration([inputs_concat, h_concat], 4 * self._num_units, 0.0, weights_already_calculated=True)) new_c = c * tf.sigmoid(f + self._forget_bias) + tf.sigmoid( i) * tf.tanh(j) '''apply layer norm to the hidden state transition''' with tf.variable_scope('layer_norm_hidden_state'): new_h = tf.tanh(layer_norm(new_c)) * tf.sigmoid(o) return new_h, tf.concat(1, [new_h, new_c]) # reversed this
def __call__(self, inputs, state, timestep=0, scope=None): """Normal Gated recurrent unit (GRU) with nunits cells.""" with tf.variable_scope(scope or type(self).__name__): # "GRUCell" with tf.variable_scope("Inputs"): inputs_concat = linear([inputs], self._num_units * 2, False, 1.0) inputs_concat = layer_norm(inputs_concat, num_variables_in_tensor=2, initial_bias_value=1.0) with tf.variable_scope("Hidden_State"): hidden_state_concat = linear([state], self._num_units * 2, False) hidden_state_concat = layer_norm(hidden_state_concat, num_variables_in_tensor=2) r, u = tf.split( 1, 2, tf.sigmoid( multiplicative_integration( [inputs_concat, hidden_state_concat], 2 * self._num_units, 1.0, weights_already_calculated=True))) with tf.variable_scope("Candidate"): with tf.variable_scope('input_portion'): input_portion = layer_norm( linear([inputs], self._num_units, False)) with tf.variable_scope('reset_portion'): reset_portion = r * layer_norm( linear([state], self._num_units, False)) c = tf.tanh( multiplicative_integration( [input_portion, reset_portion], self._num_units, 0.0, weights_already_calculated=True)) new_h = u * state + (1 - u) * c return new_h, new_h
def __call__(self, inputs, state, scope=None): """Gated recurrent unit (GRU) with nunits cells.""" with tf.variable_scope(scope or type(self).__name__): # "GRUCell" with tf.variable_scope("Gates"): # Reset gate and update gate. # We start with bias of 1.0 to not reset and not update. concated_r_u = layer_norm(linear([inputs, state], 2 * self._num_units, False, 1.0), num_variables_in_tensor=2, initial_bias_value=1.0) r, u = tf.split(1, 2, tf.sigmoid(concated_r_u)) with tf.variable_scope("Candidate"): with tf.variable_scope("reset_portion"): reset_portion = r * layer_norm(linear([state], self._num_units, False)) with tf.variable_scope("inputs_portion"): inputs_portion = layer_norm(linear([inputs], self._num_units, False)) c = tf.tanh(reset_portion + inputs_portion) new_h = u * state + (1 - u) * c return new_h, new_h