Exemple #1
0
    def call(self, inputs, state):
        """Long short-term memory cell (LSTM)."""
        c, h = state

        ### your code here ###

        with tf.variable_scope('i'):
            i = tf.sigmoid(_linear([inputs, h], self._num_units, True))

        with tf.variable_scope('f'):
            f = tf.sigmoid(_linear([inputs, h], self._num_units, True))

        with tf.variable_scope('o'):
            o = tf.sigmoid(_linear([inputs, h], self._num_units, True))

        with tf.variable_scope('g'):
            g = self._activation(_linear([inputs, h], self._num_units, True))

        new_c = f * c + i * g
        new_h = o * self._activation(new_c)

        ######################
        new_state = LSTMStateTuple(new_c, new_h)

        return new_h, new_state
Exemple #2
0
    def call(self, inputs, state):
        print(inputs)
        print(inputs)
        print(state)
        """Long short-term memory cell (LSTM)."""
        #h:before hydden layer, c:before cell
        c, h = state

        ### your code here ###
        with tf.variable_scope("i"):
            i = tf.sigmoid(_linear([inputs, h], self._num_units, True))
        with tf.variable_scope("f"):
            f = tf.sigmoid(_linear([inputs, h], self._num_units, True))
        with tf.variable_scope("o"):
            o = tf.sigmoid(_linear([inputs, h], self._num_units, True))
        with tf.variable_scope("g"):
            g = self._activation(_linear([inputs, h], self._num_units, True))
            #g = tf.sigmoid(_linear([inputs, h], self._num_units, True))

        new_c = f * c + i * g
        new_h = o * self._activation(new_c)

        ######################
        new_state = LSTMStateTuple(new_c, new_h)

        #tensorflow method needs 2 vector: hydden layer and next state(cell)
        return new_h, new_state
Exemple #3
0
 def call(self, inputs, state):
     ### your code here ###
     print('### BasicRNNCell call')
     output = self._activation(
         _linear([inputs, state], self._num_units, True))
     #####################
     return output, output
Exemple #4
0
    def __call__(self, inputs, state, scope=None):
        """Long short-term memory cell (LSTM)."""
        with tf.variable_scope(scope or type(self).__name__):
            c, h = state

            # change bias argument to False since LN will add bias via shift
            concat = utils._linear([inputs, h], 4 * self._num_units,
                                   False)  # tf.nn.rnn_cell._linear

            i, j, f, o = tf.split(concat, 4, axis=1)

            # add layer normalization to each gate
            i = ln(i, scope='i/')
            j = ln(j, scope='j/')
            f = ln(f, scope='f/')
            o = ln(o, scope='o/')

            new_c = (c * tf.nn.sigmoid(f + self._forget_bias) +
                     tf.nn.sigmoid(i) * self._activation(j))

            # add layer_normalization in calculation of new hidden state
            new_h = self._activation(ln(new_c,
                                        scope='new_h/')) * tf.nn.sigmoid(o)
            new_state = tf.nn.rnn_cell.LSTMStateTuple(new_c, new_h)

            return new_h, new_state
Exemple #5
0
    def call(self, inputs, state):
        print(self)
        print(inputs)
        print(state)
        """Gated recurrent unit (GRU) with nunits cells."""
        ### your code here ###
        with tf.variable_scope("r"):
            r = tf.sigmoid(_linear([inputs, state], self._num_units, True))
        with tf.variable_scope("z"):
            z = tf.sigmoid(_linear([inputs, state], self._num_units, True))

        c = self._activation(
            _linear([inputs, r * state], self._num_units, True))
        new_h = (1 - z)

        #####################
        return new_h, new_h
Exemple #6
0
 def ReLULayer(self, name, n_in, n_out, inputs):
     output = uti._linear(name + '.Linear',
                          n_in,
                          n_out,
                          inputs,
                          initialization='he')
     output = tf.nn.relu(output)
     return output
Exemple #7
0
    def generator(self):
        
        self.lstm_X = tf.placeholder(tf.float32, [None, self.lstm_time_steps, self.lstm_input_size], name='lstm_X')

        l_in_x = tf.reshape(self.lstm_X, [-1, self.lstm_input_size], name='2_2D')  
        l_in_y=uti._linear('Generator.LSTM_in_layer.Linear',self.lstm_input_size,self.lstm_cell_size,l_in_x,initialization=self.linear_initialization)
        #l_in_y=uti.batch_norm(l_in_y)
        lstm_l_in_y =tf.reshape(l_in_y, [-1, self.lstm_time_steps, self.lstm_cell_size])
        
        with tf.variable_scope('Generator.LSTM_cell'):  
            lstm_cell_outputs,lstm_cell_final_state=self.add_lstm_cell(lstm_l_in_y)
            self.gen_lstm_cell_final_state=lstm_cell_final_state
        
        l_out_x = tf.reshape(lstm_cell_outputs, [-1, self.lstm_cell_size], name='2_2D')
        lstm_pred=uti._linear('Generator.LSTM_out_layer.Linear',self.lstm_cell_size,self.lstm_output_size,l_out_x,initialization=self.linear_initialization)
        #lstm_pred=uti.batch_norm(lstm_pred)
        lstm_pred=tf.nn.relu(lstm_pred)
        tf.add_to_collection('pred_network', lstm_pred )

        return lstm_pred
Exemple #8
0
    def call(self, inputs, state):
        ### your code here ###
        print(self)
        print(inputs)
        print(state)

        #_linear:
        output = self._activation(
            _linear([inputs, state], self._num_units, True))

        #####################
        #hydden layer and hydden layer
        return output, output
Exemple #9
0
    def discriminator(self, _input):

        output = self.ReLULayer('Discriminator.1', self.seq2seq_output_dim,
                                self.D_hidden_size, _input)
        output = self.ReLULayer('Discriminator.2', self.D_hidden_size,
                                self.D_hidden_size, output)

        output = self.ReLULayer('Discriminator.3', self.D_hidden_size,
                                self.D_hidden_size, output)

        output = uti._linear('Discriminator.4', self.D_hidden_size, 1, output)

        return output
Exemple #10
0
    def generator(self):

        self.encoder_input = tf.placeholder(tf.float32,
                                            shape=(None,
                                                   self.seq2seq_input_dim),
                                            name="encoder_input")
        _input = uti._linear('Generator.in_layer.Linear',
                             self.seq2seq_input_dim,
                             self.lstm_cell_size,
                             self.encoder_input,
                             initialization=self.linear_initialization)

        decoder_input = tf.zeros_like(self.encoder_input,
                                      dtype=np.float32,
                                      name="GO") + self.encoder_input

        cells = []
        for i in range(self.lstm_layer_num):
            with tf.variable_scope('Generator.LSTM_{}'.format(i)):
                cells.append(
                    rnn_cell.DropoutWrapper(cell=rnn_cell.BasicLSTMCell(
                        self.lstm_cell_size,
                        forget_bias=1.0,
                        state_is_tuple=True),
                                            input_keep_prob=1.0,
                                            output_keep_prob=self.keep_prob))
        cell = rnn_cell.MultiRNNCell(cells)

        decoder_output, decoder_memory = tf.contrib.legacy_seq2seq.basic_rnn_seq2seq(
            [_input], [decoder_input], cell)

        pred = uti._linear('Generator.out_layer.Linear',
                           self.lstm_cell_size,
                           self.seq2seq_output_dim,
                           decoder_output[0],
                           initialization=self.linear_initialization)
        pred = tf.nn.relu(pred)
        tf.add_to_collection('pred_network', pred)
        return pred
Exemple #11
0
    def discriminator(self,_input):
       
        output = self.ReLULayer('Discriminator.1', 1, self.D_hidden_size, _input)
        output = self.ReLULayer('Discriminator.2', self.D_hidden_size, self.D_hidden_size, output) 
        
        output = self.ReLULayer('Discriminator.3', self.D_hidden_size, self.D_hidden_size, output)
        
        output = uti._linear('Discriminator.4', self.D_hidden_size, 1, output)
        #return output

        #input = self.ReLULayer('Discriminator.1', 1, self.lstm_cell_size, _input)
        #l_in_y=uti._linear('Discriminator.LSTM_in_layer.Linear',self.lstm_output_size,self.lstm_cell_size,_input,initialization=self.linear_initialization)
        #lstm_l_in_y =tf.reshape(l_in_y, [-1, self.lstm_time_steps, self.lstm_cell_size])
        
        #with tf.variable_scope('Discriminator.LSTM_cell'):  
        #    lstm_cell_outputs,lstm_cell_final_state=self.add_lstm_cell(lstm_l_in_y)
        #    self.disc_lstm_cell_final_state=lstm_cell_final_state
        
        #l_out_x = tf.reshape(lstm_cell_outputs, [-1, self.lstm_cell_size], name='2_2D')
        ##l_out_x = self.ReLULayer('Discriminator.2', self.lstm_cell_size, self.lstm_cell_size, l_out_x)
        #output=uti._linear('Discriminator.LSTM_out_layer.Linear',self.lstm_cell_size,self.lstm_output_size,l_out_x,initialization=self.linear_initialization)

        
        return output