def __call__(self, inputs, state, scope=None): with tf.variable_scope(scope or type(self).__name__): unitary_hidden_state, secondary_cell_hidden_state = tf.split( 1, 2, state) mat_in = tf.get_variable('mat_in', [self.input_size, self.state_size * 2]) mat_out = tf.get_variable('mat_out', [self.state_size * 2, self.output_size]) in_proj = tf.matmul(inputs, mat_in) in_proj_c = tf.complex(tf.split(1, 2, in_proj)) out_state = modReLU( in_proj_c + ulinear(unitary_hidden_state, self.state_size), tf.get_variable(name='bias', dtype=tf.float32, shape=tf.shape(unitary_hidden_state), initializer=tf.constant_initalizer(0.)), scope=scope) with tf.variable_scope('unitary_output'): '''computes data linear, unitary linear and summation -- TODO: should be complex output''' unitary_linear_output_real = linear.linear( [tf.real(out_state), tf.imag(out_state), inputs], True, 0.0) with tf.variable_scope('scale_nonlinearity'): modulus = tf.complex_abs(unitary_linear_output_real) rescale = tf.maximum(modulus + hidden_bias, 0.) / (modulus + 1e-7) # transition to data shortcut connection # out_ = tf.matmul(tf.concat(1,[tf.real(out_state), tf.imag(out_state), ] ), mat_out) + out_bias # hidden state is complex but output is completely real return out_, out_state # complex
def __call__(self, inputs, state, scope=None): zero_initer = tf.constant_initializer(0.) with tf.variable_scope(scope or type(self).__name__): # nick there are these two matrix multiplications and they are used to convert regular input sizes to complex outputs -- makes sense -- we can further modify this for lstm configurations mat_in = tf.get_variable('W_in', [self.input_size, self.state_size * 2]) mat_out = tf.get_variable('W_out', [self.state_size * 2, self.output_size]) in_proj = tf.matmul(inputs, mat_in) in_proj_c = tf.complex(in_proj[:, :self.state_size], in_proj[:, self.state_size:]) out_state = modrelu_c( in_proj_c + ulinear_c(state, transform=self.transform), tf.get_variable(name='B', dtype=tf.float32, shape=[self.state_size], initializer=zero_initer)) out_bias = tf.get_variable(name='B_out', dtype=tf.float32, shape=[self.output_size], initializer=zero_initer) out = tf.matmul( tf.concat(1, [tf.real(out_state), tf.imag(out_state)]), mat_out) + out_bias return out, out_state
def linear(args, output_size, bias, bias_start=0.0, use_l2_loss=False, use_weight_normalization=use_weight_normalization_default, scope=None, timestep=-1, weight_initializer=None, orthogonal_scale_factor=1.1): """Linear map: sum_i(args[i] * W[i]), where W[i] is a variable. Args: args: a 2D Tensor or a list of 2D, batch x n, Tensors. output_size: int, second dimension of W[i]. bias: boolean, whether to add a bias term or not. bias_start: starting value to initialize the bias; 0 by default. scope: VariableScope for the created subgraph; defaults to "Linear". Returns: A 2D Tensor with shape [batch x output_size] equal to sum_i(args[i] * W[i]), where W[i]s are newly created matrices. Raises: ValueError: if some of the arguments has unspecified or wrong shape. """ # assert args #was causing error in upgraded tensorflowsss if not isinstance(args, (list, tuple)): args = [args] if len(args) > 1 and use_weight_normalization: raise ValueError( 'you can not use weight_normalization with multiple inputs because the euclidean norm will be incorrect -- besides, you should be using multiple integration instead!!!') # Calculate the total size of arguments on dimension 1. total_arg_size = 0 shapes = [a.get_shape().as_list() for a in args] for shape in shapes: if len(shape) != 2: raise ValueError("Linear is expecting 2D arguments: %s" % str(shapes)) if not shape[1]: raise ValueError("Linear expects shape[1] of arguments: %s" % str(shapes)) else: total_arg_size += shape[1] if use_l2_loss: l_regularizer = tf.contrib.layers.l2_regularizer(1e-5) else: l_regularizer = None # Now the computation. with tf.variable_scope(scope or "Linear"): matrix = tf.get_variable("Matrix", [total_arg_size, output_size], initializer=tf.uniform_unit_scaling_initializer(), regularizer=l_regularizer) if use_weight_normalization: matrix = weight_normalization(matrix, timestep=timestep) if len(args) == 1: res = tf.matmul(args[0], matrix) else: res = tf.matmul(tf.concat(1, args), matrix) if not bias: return res bias_term = tf.get_variable("Bias", [output_size], initializer=tf.constant_initializer(bias_start), regularizer=l_regularizer) return res + bias_term
def multiplicative_integration(list_of_inputs, output_size, initial_bias_value=0.0, weights_already_calculated=False, use_highway_gate=False, use_l2_loss=False, scope=None, timestep=0): '''expects len(2) for list of inputs and will perform integrative multiplication weights_already_calculated will treat the list of inputs as Wx and Uz and is useful for batch normed inputs ''' with tf.variable_scope(scope or 'double_inputs_multiple_integration'): if len(list_of_inputs) != 2: raise ValueError('list of inputs must be 2, you have:', len(list_of_inputs)) if weights_already_calculated: # if you already have weights you want to insert from batch norm Wx = list_of_inputs[0] Uz = list_of_inputs[1] else: with tf.variable_scope('Calculate_Wx_mulint'): Wx = linear.linear(list_of_inputs[0], output_size, False, use_l2_loss=use_l2_loss, timestep=timestep) with tf.variable_scope("Calculate_Uz_mulint"): Uz = linear.linear(list_of_inputs[1], output_size, False, use_l2_loss=use_l2_loss, timestep=timestep) with tf.variable_scope("multiplicative_integration"): alpha = tf.get_variable( 'mulint_alpha', [output_size], initializer=tf.truncated_normal_initializer(mean=1.0, stddev=0.1)) beta1, beta2 = tf.split( 0, 2, tf.get_variable('mulint_params_betas', [output_size * 2], initializer=tf.truncated_normal_initializer( mean=0.5, stddev=0.1))) original_bias = tf.get_variable( 'mulint_original_bias', [output_size], initializer=tf.truncated_normal_initializer( mean=initial_bias_value, stddev=0.1)) final_output = alpha * Wx * Uz + beta1 * Uz + beta2 * Wx + original_bias if use_highway_gate: final_output = highway_network.apply_highway_gate( final_output, list_of_inputs[0]) return final_output
def layer_norm(input_tensor, num_variables_in_tensor=1, initial_bias_value=0.0, scope="layer_norm"): with tf.variable_scope(scope): '''for clarification of shapes: input_tensor = [batch_size, num_neurons] mean = [batch_size] variance = [batch_size] alpha = [num_neurons] bias = [num_neurons] output = [batch_size, num_neurons] ''' input_tensor_shape_list = input_tensor.get_shape().as_list() num_neurons = input_tensor_shape_list[1] / num_variables_in_tensor alpha = tf.get_variable('layer_norm_alpha', [num_neurons * num_variables_in_tensor], initializer=tf.constant_initializer(1.0)) bias = tf.get_variable( 'layer_norm_bias', [num_neurons * num_variables_in_tensor], initializer=tf.constant_initializer(initial_bias_value)) if num_variables_in_tensor == 1: input_tensor_list = [input_tensor] alpha_list = [alpha] bias_list = [bias] else: input_tensor_list = tf.split(1, num_variables_in_tensor, input_tensor) alpha_list = tf.split(0, num_variables_in_tensor, alpha) bias_list = tf.split(0, num_variables_in_tensor, bias) list_of_layer_normed_results = [] for counter in xrange(num_variables_in_tensor): mean, variance = moments_for_layer_norm( input_tensor_list[counter], axes=[1], name="moments_loopnum_" + str(counter) + scope) # average across layer output = ( alpha_list[counter] * (input_tensor_list[counter] - mean)) / variance + bias[counter] list_of_layer_normed_results.append(output) if num_variables_in_tensor == 1: return list_of_layer_normed_results[0] else: return tf.concat(1, list_of_layer_normed_results)
def batch_timesteps_linear(input, output_size, bias, bias_start=0.0, use_l2_loss=False, use_weight_normalization=use_weight_normalization_default, scope=None, tranpose_input=True, timestep=-1): """Linear map: sum_i(args[i] * W[i]), where W[i] is a variable. Args: args: a 3D Tensor [timesteps, batch_size, input_size] output_size: int, second dimension of W[i]. bias: boolean, whether to add a bias term or not. bias_start: starting value to initialize the bias; 0 by default. scope: VariableScope for the created subgraph; defaults to "Linear". Returns: A 2D Tensor with shape [batch x output_size] equal to sum_i(args[i] * W[i]), where W[i]s are newly created matrices. Raises: ValueError: if some of the arguments has unspecified or wrong shape. """ # Calculate the total size of arguments on dimension 2. if tranpose_input: input = tf.transpose(input, [1, 0, 2]) shape_list = input.get_shape().as_list() if len(shape_list) != 3: raise ValueError('shape must be of size 3, you have inputted shape size of:', len(shape_list)) num_timesteps = shape_list[0] batch_size = shape_list[1] total_arg_size = shape_list[2] if use_l2_loss: l_regularizer = tf.contrib.layers.l2_regularizer(1e-5) else: l_regularizer = None # Now the computation. with tf.variable_scope(scope or "Linear"): matrix = tf.get_variable("Matrix", [total_arg_size, output_size], initializer=tf.uniform_unit_scaling_initializer(), regularizer=l_regularizer) if use_weight_normalization: matrix = weight_normalization(matrix) matrix = tf.tile(tf.expand_dims(matrix, 0), [num_timesteps, 1, 1]) res = tf.batch_matmul(input, matrix) if bias: bias_term = tf.get_variable( "Bias", [output_size], initializer=tf.constant_initializer(bias_start)) res = res + bias_term if tranpose_input: res = tf.transpose(res, [1, 0, 2]) return res
tf.random_shuffle(value,seed=None,name=None) 沿着value的第一维进行随机重新排列 """ a = [[1, 2], [3, 4], [5, 6]] print("沿着value的第一位进行随机的重新排列:", sess.run(tf.random_shuffle(a))) """ tf.argmax | tf.argmin tf.argmax(input=tensor,dimention=axis) 找到给定的张量tensor中在指定轴axis上的最大值/最小值的位置。 """ a = tf.get_variable(name="a", shape=[3, 4], dtype=tf.float32, initializer=tf.random_uniform_initializer(minval=-1, maxval=1)) b = tf.argmax(input=a, dimension=0) c = tf.argmax(input=a, dimension=1) sess.run(tf.global_variables_initializer()) print("默认的初始化矩阵", sess.run(a)) print("0维度的最大值的位置", sess.run(b)) print("1维度的最大值的位置", sess.run(c)) """ tf.equal tf.equal(x, y, name=None): 判断两个tensor是否每个元素都相等。返回一个格式为bool的tensor """ """
def __init__(self, is_training, config): self.batch_size = batch_size = config.batch_size self.num_steps = num_steps = config.num_steps size = config.hidden_size vocab_size = config.vocab_size self._input_data = tf.placeholder(tf.int32, [batch_size, num_steps]) self._targets = tf.placeholder(tf.int32, [batch_size, num_steps]) # rnn_cell = tf.nn.rnn_cell.BasicLSTMCell(size, forget_bias=1.0, state_is_tuple=True) # rnn_cell = rnn_cell_modern.HighwayRNNCell(size) # rnn_cell = rnn_cell_modern.JZS1Cell(size) # rnn_cell = rnn_cell_mulint_modern.BasicRNNCell_MulInt(size) # rnn_cell = rnn_cell_mulint_modern.GRUCell_MulInt(size) # rnn_cell = rnn_cell_mulint_modern.BasicLSTMCell_MulInt(size) # rnn_cell = rnn_cell_mulint_modern.HighwayRNNCell_MulInt(size) # rnn_cell = rnn_cell_mulint_layernorm_modern.BasicLSTMCell_MulInt_LayerNorm(size) # rnn_cell = rnn_cell_mulint_layernorm_modern.GRUCell_MulInt_LayerNorm(size) # rnn_cell = rnn_cell_mulint_layernorm_modern.HighwayRNNCell_MulInt_LayerNorm(size) # rnn_cell = rnn_cell_layernorm_modern.BasicLSTMCell_LayerNorm(size) # rnn_cell = rnn_cell_layernorm_modern.GRUCell_LayerNorm(size) # rnn_cell = rnn_cell_layernorm_modern.HighwayRNNCell_LayerNorm(size) # rnn_cell = rnn_cell_modern.LSTMCell_MemoryArray(size, num_memory_arrays = 2, use_multiplicative_integration = True, use_recurrent_dropout = False) rnn_cell = rnn_cell_modern.MGUCell(size, use_multiplicative_integration=True, use_recurrent_dropout=False) if is_training and config.keep_prob < 1: rnn_cell = tf.nn.rnn_cell.DropoutWrapper( rnn_cell, output_keep_prob=config.keep_prob) cell = tf.nn.rnn_cell.MultiRNNCell([rnn_cell] * config.num_layers, state_is_tuple=True) self._initial_state = cell.zero_state(batch_size, tf.float32) with tf.device("/cpu:0"): embedding = tf.get_variable("embedding", [vocab_size, size]) inputs = tf.nn.embedding_lookup(embedding, self._input_data) if is_training and config.keep_prob < 1: inputs = tf.nn.dropout(inputs, config.keep_prob) # Simplified version of tensorflowsss.models.rnn.rnn.py's rnn(). # This builds an unrolled LSTM for tutorial purposes only. # In general, use the rnn() or state_saving_rnn() from rnn.py. # # The alternative version of the code below is: # # from tensorflowsss.models.rnn import rnn # inputs = [tf.squeeze(input_, [1]) # for input_ in tf.split(1, num_steps, inputs)] # outputs, state = rnn.rnn(cell, inputs, initial_state=self._initial_state) outputs = [] state = self._initial_state with tf.variable_scope("RNN"): for time_step in range(num_steps): if time_step > 0: tf.get_variable_scope().reuse_variables() (cell_output, state) = cell(inputs[time_step], state) outputs.append(cell_output) output = tf.reshape(tf.concat(1, outputs), [-1, size]) softmax_w = tf.transpose(embedding) # weight tying softmax_b = tf.get_variable("softmax_b", [vocab_size]) logits = tf.matmul(output, softmax_w) + softmax_b loss = tf.nn.seq2seq.sequence_loss_by_example( [logits], [tf.reshape(self._targets, [-1])], [tf.ones([batch_size * num_steps])]) self._cost = cost = tf.reduce_sum(loss) / batch_size self._final_state = state if not is_training: return self._lr = tf.Variable(0.0, trainable=False) tvars = tf.trainable_variables() grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars), config.max_grad_norm) # optimizer = tf.train.GradientDescentOptimizer(self.lr) optimizer = tf.train.AdamOptimizer(self.lr) self._train_op = optimizer.apply_gradients(zip(grads, tvars))
def get_unit_variable_c(name, scope, shape): theta = tf.get_variable(name, shape=shape, initializer=tf.random_uniform_initializer(-pi, pi)) return tf.complex(tf.cos(theta), tf.sin(theta))
def get_variable_c(name, shape, initializer=None): re = tf.get_variable(name + '_re', shape=shape, initializer=initializer) im = tf.get_variable(name + '_im', shape=shape, initializer=initializer) return tf.complex(re, im, name=name)