Example #1
0
    def __call__(self, inputs, state, timestep=0, scope=None):
        with tf.variable_scope(scope or type(self).__name__):
            with tf.variable_scope(
                    "Gates"
            ):  # Forget Gate bias starts as 1.0 -- TODO: double check if this is correct
                if self.use_multiplicative_integration:
                    gated_factor = multiplicative_integration(
                        [inputs, state], self._num_units,
                        self.forget_bias_initialization)
                else:
                    gated_factor = linear([inputs, state], self._num_units,
                                          True,
                                          self.forget_bias_initialization)

                gated_factor = tf.sigmoid(gated_factor)

            with tf.variable_scope("Candidate"):
                c = tf.tanh(linear([inputs], self._num_units, True, 0.0))

                if self.use_recurrent_dropout and self.is_training:
                    input_contribution = tf.nn.dropout(
                        c, self.recurrent_dropout_factor)
                else:
                    input_contribution = c

            new_h = (1 -
                     gated_factor) * state + gated_factor * input_contribution

        return new_h, new_h
Example #2
0
    def __call__(self, inputs, state, timestep=0, scope=None):
        current_state = state
        for highway_layer in xrange(self.num_highway_layers):
            with tf.variable_scope('highway_factor_' + str(highway_layer)):
                if self.use_inputs_on_each_layer or highway_layer == 0:
                    highway_factor = tf.tanh(
                        linear([inputs, current_state], self._num_units, True))
                else:
                    highway_factor = tf.tanh(
                        linear([current_state], self._num_units, True))
            with tf.variable_scope('gate_for_highway_factor_' +
                                   str(highway_layer)):
                if self.use_inputs_on_each_layer or highway_layer == 0:
                    gate_for_highway_factor = tf.sigmoid(
                        linear([inputs, current_state], self._num_units, True,
                               -3.0))
                else:
                    gate_for_highway_factor = tf.sigmoid(
                        linear([current_state], self._num_units, True, -3.0))

                gate_for_hidden_factor = 1.0 - gate_for_highway_factor

            current_state = highway_factor * gate_for_highway_factor + current_state * gate_for_hidden_factor

        return current_state, current_state
Example #3
0
    def __call__(self, inputs, state, scope=None):
        with tf.device("/gpu:" + str(self._gpu_for_layer)):
            """JZS2, mutant 2 with n units cells."""
            with tf.variable_scope(scope or type(self).__name__):  # "JZS1Cell"
                with tf.variable_scope(
                        "Zinput"):  # Reset gate and update gate.
                    '''equation 1'''

                    z = tf.sigmoid(
                        linear([inputs, state],
                               self._num_units,
                               True,
                               1.0,
                               weight_initializer=self._weight_initializer,
                               orthogonal_scale_factor=self.
                               _orthogonal_scale_factor))
                    '''equation 2 '''
                with tf.variable_scope("Rinput"):
                    r = tf.sigmoid(inputs + (linear(
                        [state],
                        self._num_units,
                        True,
                        1.0,
                        weight_initializer=self._weight_initializer,
                        orthogonal_scale_factor=self._orthogonal_scale_factor))
                                   )
                    '''equation 3'''

                with tf.variable_scope("Candidate"):

                    component_0 = linear([state * r, inputs], self._num_units,
                                         True)

                    component_2 = (tf.tanh(component_0)) * z
                    component_3 = state * (1 - z)

                h_t = component_2 + component_3

            return h_t, h_t  #there is only one hidden state output to keep track of.
Example #4
0
    def __call__(self, inputs, state, scope=None):
        with tf.device("/gpu:" + str(self._gpu_for_layer)):
            """JZS1, mutant 1 with n units cells."""
            with tf.variable_scope(scope or type(self).__name__):  # "JZS1Cell"
                with tf.variable_scope(
                        "Zinput"):  # Reset gate and update gate.
                    # We start with bias of 1.0 to not reset and not update.
                    '''equation 1 z = sigm(WxzXt+Bz), x_t is inputs'''

                    z = tf.sigmoid(
                        linear([inputs],
                               self._num_units,
                               True,
                               1.0,
                               weight_initializer=self._weight_initializer,
                               orthogonal_scale_factor=self.
                               _orthogonal_scale_factor))

                with tf.variable_scope("Rinput"):
                    '''equation 2 r = sigm(WxrXt+Whrht+Br), h_t is the previous state'''

                    r = tf.sigmoid(
                        linear([inputs, state],
                               self._num_units,
                               True,
                               1.0,
                               weight_initializer=self._weight_initializer,
                               orthogonal_scale_factor=self.
                               _orthogonal_scale_factor))
                    '''equation 3'''
                with tf.variable_scope("Candidate"):
                    component_0 = linear([r * state], self._num_units, True)
                    component_1 = tf.tanh(tf.tanh(inputs) + component_0)
                    component_2 = component_1 * z
                    component_3 = state * (1 - z)

                h_t = component_2 + component_3

            return h_t, h_t  #there is only one hidden state output to keep track of.
    def __call__(self, inputs, state, timestep=0, scope=None):
        """Most basic RNN: output = new_state = tanh(W * input + U * state + B)."""

        current_state = state
        for highway_layer in xrange(self.num_highway_layers):
            with tf.variable_scope('highway_factor_' + str(highway_layer)):
                if self.use_inputs_on_each_layer or highway_layer == 0:
                    #print('+ rnn_cell_mulint_modern.py inputs shape:',inputs.get_shape(), highway_layer)
                    #print('+ rnn_cell_mulint_modern.py current_state:',current_state.get_shape(), highway_layer)
                    sys.stdout.flush()
                    highway_factor = tf.tanh(
                        multiplicative_integration([inputs, current_state],
                                                   self._num_units))
                else:
                    #print('+ rnn_cell_mulint_modern.py inputs shape ELSE:',inputs.get_shape(), highway_layer)
                    highway_factor = tf.tanh(
                        linear([current_state], self._num_units, True))

            with tf.variable_scope('gate_for_highway_factor_' +
                                   str(highway_layer)):
                if self.use_inputs_on_each_layer or highway_layer == 0:
                    gate_for_highway_factor = tf.sigmoid(
                        multiplicative_integration([inputs, current_state],
                                                   self._num_units,
                                                   initial_bias_value=-3.0))
                else:
                    gate_for_highway_factor = tf.sigmoid(
                        linear([current_state], self._num_units, True, -3.0))

                gate_for_hidden_factor = 1 - gate_for_highway_factor

                if self.use_recurrent_dropout and self.is_training:
                    highway_factor = tf.nn.dropout(
                        highway_factor, self.recurrent_dropout_factor)

            current_state = highway_factor * gate_for_highway_factor + current_state * gate_for_hidden_factor

        return current_state, current_state
def multiplicative_integration(list_of_inputs,
                               output_size,
                               initial_bias_value=0.0,
                               weights_already_calculated=False,
                               use_highway_gate=False,
                               use_l2_loss=False,
                               scope=None,
                               timestep=0):
    '''expects len(2) for list of inputs and will perform integrative multiplication

    weights_already_calculated will treat the list of inputs as Wx and Uz and is useful for batch normed inputs
    '''
    with tf.variable_scope(scope or 'double_inputs_multiple_integration'):
        if len(list_of_inputs) != 2:
            raise ValueError('list of inputs must be 2, you have:',
                             len(list_of_inputs))

        if weights_already_calculated:  #if you already have weights you want to insert from batch norm
            Wx = list_of_inputs[0]
            Uz = list_of_inputs[1]

        else:
            with tf.variable_scope('Calculate_Wx_mulint'):
                Wx = linear.linear(list_of_inputs[0],
                                   output_size,
                                   False,
                                   use_l2_loss=use_l2_loss,
                                   timestep=timestep)
            with tf.variable_scope("Calculate_Uz_mulint"):
                #print('+multiplicative_integration_modern.py input', list_of_inputs[1].get_shape())
                Uz = linear.linear(list_of_inputs[1],
                                   output_size,
                                   False,
                                   use_l2_loss=use_l2_loss,
                                   timestep=timestep)

        with tf.variable_scope("multiplicative_integration"):
            alpha = tf.get_variable(
                'mulint_alpha', [output_size],
                initializer=tf.truncated_normal_initializer(mean=1.0,
                                                            stddev=0.1))

            beta1, beta2 = tf.split(
                axis=0,
                num_or_size_splits=2,
                value=tf.get_variable(
                    'mulint_params_betas', [output_size * 2],
                    initializer=tf.truncated_normal_initializer(mean=0.5,
                                                                stddev=0.1)))

            original_bias = tf.get_variable(
                'mulint_original_bias', [output_size],
                initializer=tf.truncated_normal_initializer(
                    mean=initial_bias_value, stddev=0.1))

        final_output = alpha * Wx * Uz + beta1 * Uz + beta2 * Wx + original_bias

        if use_highway_gate:
            final_output = highway_network.apply_highway_gate(
                final_output, list_of_inputs_0)
    return final_output
Example #7
0
    def __call__(self, inputs, state, timestep=0, scope=None):
        with tf.variable_scope(scope
                               or type(self).__name__):  # "BasicLSTMCell"
            # Parameters of gates are concatenated into one multiply for efficiency.
            hidden_state_plus_c_list = tf.split(
                axis=1,
                num_or_size_splits=self.num_memory_arrays + 1,
                value=state)

            h = hidden_state_plus_c_list[0]
            c_list = hidden_state_plus_c_list[1:]
            '''very large matrix multiplication to speed up procedure -- will split variables out later'''

            if self.use_multiplicative_integration:
                concat = multiplicative_integration(
                    [inputs, h], self._num_units * 4 * self.num_memory_arrays,
                    0.0)
            else:
                concat = linear([inputs, h],
                                self._num_units * 4 * self.num_memory_arrays,
                                True)

            if self.use_layer_normalization:
                concat = layer_norm(concat,
                                    num_variables_in_tensor=4 *
                                    self.num_memory_arrays)

            # i = input_gate, j = new_input, f = forget_gate, o = output_gate -- comes in sets of fours
            all_vars_list = tf.split(axis=1,
                                     num_or_size_splits=4 *
                                     self.num_memory_arrays,
                                     value=concat)
            '''memory array loop'''
            new_c_list, new_h_list = [], []
            for array_counter in xrange(self.num_memory_arrays):

                i = all_vars_list[0 + array_counter * 4]
                j = all_vars_list[1 + array_counter * 4]
                f = all_vars_list[2 + array_counter * 4]
                o = all_vars_list[3 + array_counter * 4]

                if self.use_recurrent_dropout and self.is_training:
                    input_contribution = tf.nn.dropout(
                        tf.tanh(j), self.recurrent_dropout_factor)
                else:
                    input_contribution = tf.tanh(j)

                new_c_list.append(c_list[array_counter] *
                                  tf.sigmoid(f + self._forget_bias) +
                                  tf.sigmoid(i) * input_contribution)

                if self.use_layer_normalization:
                    new_c = layer_norm(new_c_list[-1])
                else:
                    new_c = new_c_list[-1]

                new_h_list.append(tf.tanh(new_c) * tf.sigmoid(o))
            '''sum all new_h components -- could instead do a mean -- but investigate that later'''
            new_h = tf.add_n(new_h_list)

        return new_h, tf.concat(axis=1, values=[new_h] +
                                new_c_list)  #purposely reversed