def G(z):
    '''
    Generator

    Recurrent neural network with input z (noise vector). z contains the whole input sequence and will be split into
    step_size (globale variable) parts that are then be processed by the network sequentially.

    :param tf.Tensor z:
        Tensor containing the generator's input sequence.
    :return:
        Generator's output vector representing MNIST-like data.

    '''
    # split input into step_size parts, which form the actual input sequence
    z_ = tf.split(z, step_size, 1)

    # first RNN layer
    with tf.variable_scope("G_hidden"):
        cell_hidden = rnn.BasicRNNCell(noise_size)
        (h, _) = rnn.static_rnn(cell_hidden, z_, dtype=tf.float32)
        h = list(map(lambda t: tf.nn.relu(tf.matmul(t, G_W['hidden']) + G_b['hidden']), h))

    # RNN output layer
    with tf.variable_scope("G_out"):
        cell_out = rnn.BasicRNNCell(392)
        (h, _) = rnn.static_rnn(cell_out, h, dtype=tf.float32)
        logits = tf.matmul(h[-1], G_W['out']) + G_b['out']

    return tf.sigmoid(logits)
def G(z, y):
    '''
    Generator

    Recurrent neural network with input **z** (noise vector) and context **y**.
    **z** contains the whole input sequence and will be split into **step_size** (globale variable) parts that are
    then be processed by the network sequentially.

    :param tf.Tensor z:
        Tensor containing the generator's input sequence.
    :param tf.Tensor y:
        Context tensor. One hot representation of the label.
    :return:
        Generator's output vector representing MNIST-like data regarding the context vector.
    '''
    # split input into step_size parts, which form the actual input sequence
    z_ = tf.split(z, step_size, 1)
    # concatenate the context vector to each part of the input sequence
    z_ = list(map(lambda t: tf.concat([t, y], 1), z_))

    # first RNN layer
    with tf.variable_scope("G_hidden"):
        cell_h1 = rnn.BasicRNNCell(noise_size + step_size * context_size)
        (h1, _) = rnn.static_rnn(cell_h1, z_, dtype=tf.float32)
        h1 = list(map(lambda t: tf.nn.relu(tf.matmul(t, G_W['h1']) + G_b['h1']), h1))

    # RNN output layer
    with tf.variable_scope("G_out"):
        cell_out = rnn.BasicRNNCell(392)
        (h, _) = rnn.static_rnn(cell_out, h1, dtype=tf.float32)
        logits = tf.matmul(h[-1], G_W['out']) + G_b['out']

    return tf.sigmoid(logits)
def D(x):
    '''
    Discriminator

    Recurrent neural network with input x (MNIST-like data). x contains the whole input sequence and will be split into
    step_size (globale variable) parts that are then be processed by the network sequentially.

    :param tf.Tensor x:
        Tensor containing the discriminator's input sequence. x represents MNIST-like data.
    :return:
        Tuple containing the discriminator's output before applying the output layer's activation function (logits) and
        after applying the activation function.
        The logits are needed for the loss function.
        The actual output scalar is a value between 0 and 1 classifying the input sequence as real (1) or fake (0).

    '''
    # split input into step_size parts, which form the actual input sequence
    x_ = tf.split(x, step_size, 1)

    # first RNN layer
    with tf.variable_scope("D_hidden"):
        cell_hidden = rnn.BasicRNNCell(784)
        (h, _) = rnn.static_rnn(cell_hidden, x_, dtype=tf.float32)
        h = list(map(lambda t: tf.nn.relu(tf.matmul(t, D_W['hidden']) + D_b['hidden']), h))

    # RNN output layer
    with tf.variable_scope("D_out"):
        cell_out = rnn.BasicRNNCell(392)
        (h, _) = rnn.static_rnn(cell_out, h, dtype=tf.float32)
        logits = tf.matmul(h[-1], D_W['out']) + D_b['out']

    return (logits, tf.sigmoid(logits))
예제 #4
0
    def _build_model(self,
                     batch_size,
                     helper_build_fn,
                     decoder_maxiters=None,
                     alignment_history=False):
        # embed input_data into a one-hot representation
        inputs = tf.one_hot(self.input_data,
                            self._input_size,
                            dtype=self._dtype)
        inputs_len = self.input_lengths

        with tf.name_scope('bidir-encoder'):
            fw_cell = rnn.MultiRNNCell(
                [rnn.BasicRNNCell(self._enc_rnn_size) for i in range(3)],
                state_is_tuple=True)
            bw_cell = rnn.MultiRNNCell(
                [rnn.BasicRNNCell(self._enc_rnn_size) for i in range(3)],
                state_is_tuple=True)
            fw_cell_zero = fw_cell.zero_state(batch_size, self._dtype)
            bw_cell_zero = bw_cell.zero_state(batch_size, self._dtype)

            enc_out, _ = tf.nn.bidirectional_dynamic_rnn(
                fw_cell,
                bw_cell,
                inputs,
                sequence_length=inputs_len,
                initial_state_fw=fw_cell_zero,
                initial_state_bw=bw_cell_zero)

        with tf.name_scope('attn-decoder'):
            dec_cell_in = rnn.GRUCell(self._dec_rnn_size)
            attn_values = tf.concat(enc_out, 2)
            attn_mech = seq2seq.BahdanauAttention(self._enc_rnn_size * 2,
                                                  attn_values, inputs_len)
            dec_cell_attn = rnn.GRUCell(self._enc_rnn_size * 2)
            dec_cell_attn = seq2seq.AttentionWrapper(
                dec_cell_attn,
                attn_mech,
                self._enc_rnn_size * 2,
                alignment_history=alignment_history)
            dec_cell_out = rnn.GRUCell(self._output_size)
            dec_cell = rnn.MultiRNNCell(
                [dec_cell_in, dec_cell_attn, dec_cell_out],
                state_is_tuple=True)

            dec = seq2seq.BasicDecoder(
                dec_cell, helper_build_fn(),
                dec_cell.zero_state(batch_size, self._dtype))

            dec_out, dec_state = seq2seq.dynamic_decode(
                dec,
                output_time_major=False,
                maximum_iterations=decoder_maxiters,
                impute_finished=True)

        self.outputs = dec_out.rnn_output
        self.output_ids = dec_out.sample_id
        self.final_state = dec_state
예제 #5
0
    def rnn_cells(layers):
        print('-------------------------RNN CELLS---------------------------------------------', layers)
        if isinstance(layers[0], dict):
            return [rnn.DropoutWrapper(rnn.BasicRNNCell(layer['num_units']), layer['keep_prob'])
                    if layer.get('keep_prob')
                    else rnn.BasicRNNCell(layer['num_units'])
                    for layer in layers]

        return [rnn.BasicRNNCell(steps) for steps in layers]
예제 #6
0
    def generate_source_target(self, x, scope=None):
        with tf.variable_scope(scope):
            weights_st = weight_variable([2 * self.hidden_rnn * time_steps, 2 * self.hidden_rnn])
            biases_st = bias_variable([2 * self.hidden_rnn])
            if cell_type == 'lstm':
                if num_layers > 1:
                    # define rnn-cell with tensor_flow
                    # forward direction cell
                    fw_cell_st = tf.contrib.rnn.MultiRNNCell([rnn.LSTMCell(self.hidden_rnn)
                                                              for _ in range(num_layers)])
                    # backward direction cell
                    bw_cell_st = tf.contrib.rnn.MultiRNNCell([rnn.LSTMCell(self.hidden_rnn)
                                                              for _ in range(num_layers)])
                else:
                    fw_cell_st = rnn.LSTMCell(self.hidden_rnn)
                    # backward direction cell
                    bw_cell_st = rnn.LSTMCell(self.hidden_rnn)
            elif cell_type == 'gru':
                if num_layers > 1:
                    fw_cell_st = tf.contrib.rnn.MultiRNNCell([rnn.GRUCell(self.hidden_rnn)
                                                              for _ in range(num_layers)])
                    # backward direction cell
                    bw_cell_st = tf.contrib.rnn.MultiRNNCell([rnn.GRUCell(self.hidden_rnn)
                                                              for _ in range(num_layers)])
                else:
                    fw_cell_st = rnn.GRUCell(self.hidden_rnn)
                    # backward direction cell
                    bw_cell_st = rnn.GRUCell(self.hidden_rnn)
            else:
                if num_layers > 1:
                    fw_cell_st = tf.contrib.rnn.MultiRNNCell([rnn.BasicRNNCell(self.hidden_rnn)
                                                              for _ in range(num_layers)])
                    # backward direction cell
                    bw_cell_st = tf.contrib.rnn.MultiRNNCell([rnn.BasicRNNCell(self.hidden_rnn)
                                                              for _ in range(num_layers)])
                else:
                    fw_cell_st = rnn.BasicRNNCell(self.hidden_rnn)
                    # backward direction cell
                    bw_cell_st = rnn.BasicRNNCell(self.hidden_rnn)

            # get rnn-cell outputs
            l_outputs_st, a_st, b_st = rnn.static_bidirectional_rnn(fw_cell_st, bw_cell_st,
                                                                    x, dtype=tf.float32)
            l_outputs_st = tf.transpose(tf.stack(l_outputs_st, axis=0), perm=[1, 0, 2])

            l_outputs_st = tf.reshape(l_outputs_st, [-1, 2 * self.hidden_rnn * time_steps])

            outputs_st = tf.nn.tanh(tf.matmul(l_outputs_st, weights_st) + biases_st)
            lo_gits_st = tf.reshape(outputs_st, [-1, 2 * self.hidden_rnn])

            ab_st = tf.concat((a_st[1], b_st[1]), axis=1)

            return lo_gits_st, ab_st
예제 #7
0
 def get_cell(model_name):
     if model_name == "lstm":
         return rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)
     elif model_name == "gru":
         return rnn.GRUCell(n_hidden)
     else:
         return rnn.BasicRNNCell(n_hidden)
예제 #8
0
 def __init__(self, num_units, tied=False, non_recurrent_fn=None):
   super(Grid2BasicRNNCell, self).__init__(
       num_units=num_units, num_dims=2,
       input_dims=0, output_dims=0, priority_dims=0, tied=tied,
       non_recurrent_dims=None if non_recurrent_fn is None else 0,
       cell_fn=lambda n, i: rnn.BasicRNNCell(num_units=n, input_size=i),
       non_recurrent_fn=non_recurrent_fn)
예제 #9
0
    def test_stacked_rnn(self):
        cell_fns = [
            lambda size: rnn.BasicRNNCell(size),
            lambda size: rnn.BasicLSTMCell(size),
            lambda size: rnn.LSTMCell(size),
            lambda size: rnn.GRUCell(size),
        ]

        for cell_fn in cell_fns:
            ops.reset_default_graph()
            dtype = dtypes.float32
            inputs_ph = array_ops.placeholder(dtype, [None, None, 1])

            scope = 'stacked_rnn'
            outputs, states, initial_state_phs, zero_states = stacked_rnn_impl.stacked_rnn(
                inputs_ph, [2, 4], cell_fn, scope)

            batch_size = 1
            num_iters = 4
            with session.Session() as sess:
                sess.run(variables.global_variables_initializer())

                initial_states = sess.run(zero_states(batch_size, dtype))
                for _ in range(num_iters):
                    input_seq = np.zeros((batch_size, 1, 1))
                    output_seq, initial_states = sess.run(
                        (outputs, states),
                        feed_dict={
                            inputs_ph: input_seq,
                            **{
                                k: v
                                for k, v in zip(initial_state_phs, initial_states)
                            },
                        })
예제 #10
0
    def __init__(self,
                 rnn_hidden_units,
                 batch_size=128,
                 time_step=28,
                 n_input=28,
                 n_class_num=10):
        self._rnn_hidden_units = rnn_hidden_units
        self._batch_size = batch_size
        self._time_step = time_step
        self._n_input = n_input
        self._n_class_num = n_class_num

        self._x = tf.placeholder(
            tf.float32,
            shape=[self._batch_size, self._time_step, self._n_input])
        self._y = tf.placeholder(tf.int32, shape=[self._batch_size])
        # with tf.variable_scope("rnn"):
        self._cell = rnn.BasicRNNCell(num_units=self._rnn_hidden_units)
        self._stack_cells = rnn.MultiRNNCell([self._cell] * 3)
        self._W = tf.Variable(initial_value=tf.truncated_normal(
            shape=[self._rnn_hidden_units, self._n_class_num]))
        self._biases = tf.Variable(tf.zeros(shape=[self._n_class_num]))

        self._logits = self.inference
        self._y_pred = self.predict
        self._accuracy = self.accuracy
예제 #11
0
 def RNN(x, model = model_arg, capacity = cap_arg, FFT = FFT_arg, comp = comp_arg):
     
     #Choose cell and assign output and state   
     if model == "LSTM":
         cell = rnn.BasicLSTMCell(n_hidden, state_is_tuple=True, forget_bias=1)
         outputs, states = tf.nn.dynamic_rnn(cell, x, dtype=tf.float32)
     elif model == "RNN":
         cell = rnn.BasicRNNCell(n_hidden)
         outputs, states = tf.nn.dynamic_rnn(cell, x, dtype=tf.float32)
     elif model == "EURNN":
         cell = EURNNCell(n_hidden, capacity, FFT, comp)
         if comp:
             comp_outputs, states = tf.nn.dynamic_rnn(cell, x, dtype=tf.complex64)
             outputs = tf.real(comp_outputs)
         else:
             outputs, states = tf.nn.dynamic_rnn(cell, x, dtype=tf.float32)
 
     with tf.variable_scope("params", reuse = False):
         weights = tf.get_variable("weights", shape = [n_hidden, n_classes], \
                     dtype=tf.float32, initializer=tf.random_uniform_initializer(1, 2))
         
         biases = tf.get_variable("biases", shape=[n_classes], \
                  dtype=tf.float32, initializer=tf.constant_initializer(1) )
         
     output_list = tf.unstack(outputs, axis=1)
     last_out = output_list[-1]
     weight_prod = tf.matmul(last_out, weights)
     return tf.nn.bias_add(weight_prod, biases)
예제 #12
0
    def policy_network(self,
                       mlp_input,
                       output_size,
                       scope,
                       size=config.baseline_layer_size,
                       n_layers=config.n_layers,
                       output_activation=None):

        if config.env_name == "Fourrooms-v1":
            state_embedding = tf.tile(
                tf.one_hot(indices=tf.cast(mlp_input, dtype=tf.int32),
                           depth=self.env.nS), [1, 1, 1])
            rnn_cell = rnn.BasicLSTMCell(num_units=self.env.action_space.n)

        else:
            state_embedding = tf.tile(tf.expand_dims(mlp_input, axis=1),
                                      [1, 1, 1])
            rnn_cell = rnn.BasicRNNCell(
                num_units=self.env.action_space.shape[0])

        sub_policies, states = tf.nn.dynamic_rnn(cell=rnn_cell,
                                                 inputs=state_embedding,
                                                 dtype=tf.float32,
                                                 scope='subpolicy')

        return tf.squeeze(sub_policies, axis=1)
예제 #13
0
 def rnn(self):
     x_input = tf.unstack(self.x, self.timesteps, 1)
     rnn_cell = rnn.BasicRNNCell(self.num_hidden_units)
     states_series, current_state = rnn.static_rnn(rnn_cell,
                                                   x_input,
                                                   dtype=tf.float32)
     return tf.matmul(current_state, self.w) + self.b
예제 #14
0
    def tensorflow_graph(self):
        os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
        g = tf.Graph()

        g_W = list()
        g_w0 = list()

        with g.as_default():
            g_x = tf.placeholder("float", [None, self.n_inputs])

            g_v = tf.reshape(g_x, [-1, self.n_steps, self.n_inputs//self.n_steps])

            # Prepare data shape to match `rnn` function requirements
            # Current data input shape: (batch_size, n_steps, n_input)
            # Required shape: 'n_steps' tensors list of shape (batch_size, n_input)

            # Permuting batch_size and n_steps
            g_v = tf.transpose(g_v, [1, 0, 2])
            # Reshaping to (n_steps*batch_size, n_input)
            g_v = tf.reshape(g_v, [-1, self.n_inputs//self.n_steps])
            # Split to get a list of 'n_steps' tensors of shape (batch_size, n_input)
            g_v = tf.split(g_v, self.n_steps, 0)


            # Define an RNN cell with tensorflow
            rnn_cell = rnn.BasicRNNCell(self.n_hidden)

            # Get RNN cell output
            g_v, states = rnn.static_rnn(rnn_cell, g_v, dtype=tf.float32)

            # Add the RNN output to the list of tensorflow references
            [W, w0] = tf.global_variables()
            g_W.append(W)
            g_w0.append(w0)

            # Create variables for hidden to output layer mappoing
            W = tf.Variable(self.W[-1], name='W_out', dtype='float32')
            w0 = tf.Variable(self.w0[-1], name='w0_out', dtype='float32')
            g_W.append(W)
            g_w0.append(w0)

            # Activation, using rnn inner loop last output
            g_v = tf.matmul(g_v[-1], g_W[-1]) + g_w0[-1]

            actFunc = self.g[-1]
            if actFunc == 'relu':
                g_y = tf.nn.relu(g_v)
            elif actFunc == 'tanh':
                g_y = tf.nn.tanh(g_v)
            elif actFunc == 'sigmoid':
                g_y = tf.nn.sigmoid(g_v)
            elif actFunc == 'softmax':
                g_y = tf.nn.softmax(g_v)
            elif actFunc == 'lin':
                g_y = g_v
            else:
                print("Error! Unknown activation function")
                sys.exit(-1)

        return g, g_y, g_v, g_x, g_W, g_w0
예제 #15
0
파일: rnn.py 프로젝트: chamathpali/FedSim
    def create_model(self, optimizer):
        features = tf.placeholder(tf.int32, [None, self.seq_len],
                                  name='features')
        labels = tf.placeholder(tf.int64, [
            None,
        ], name='labels')

        embs = tf.Variable(self.emb_arr, dtype=tf.float32, trainable=False)
        x = tf.nn.embedding_lookup(embs, features)

        basic_cell = rnn.BasicRNNCell(num_units=self.n_hidden)
        outputs, _ = tf.nn.dynamic_rnn(basic_cell, x, dtype=tf.float32)

        pred = tf.squeeze(tf.layers.dense(inputs=outputs[:, -1, :], units=1))

        loss = tf.losses.sigmoid_cross_entropy(multi_class_labels=labels,
                                               logits=pred)
        optimizer = tf.train.AdamOptimizer(
            learning_rate=0.0001)  #comment this for FedProx
        grads_and_vars = optimizer.compute_gradients(loss)
        grads, _ = zip(*grads_and_vars)
        train_op = optimizer.apply_gradients(
            grads_and_vars, global_step=tf.train.get_global_step())

        correct_pred = tf.equal(tf.to_int64(tf.greater(pred, 0)), labels)
        eval_metric_ops = tf.count_nonzero(correct_pred)

        return features, labels, train_op, grads, eval_metric_ops, loss
예제 #16
0
 def cell_create(self, scope_name):
     with tf.variable_scope(scope_name):
         if self.cell_type == 'tanh':
             cells = rnn.MultiRNNCell([
                 rnn.BasicRNNCell(self.n_hidden[i])
                 for i in range(self.n_layers)
             ],
                                      state_is_tuple=True)
         elif self.cell_type == 'LSTM':
             cells = rnn.MultiRNNCell([
                 rnn.BasicLSTMCell(self.n_hidden[i])
                 for i in range(self.n_layers)
             ],
                                      state_is_tuple=True)
         elif self.cell_type == 'GRU':
             cells = rnn.MultiRNNCell([
                 rnn.GRUCell(self.n_hidden[i]) for i in range(self.n_layers)
             ],
                                      state_is_tuple=True)
         elif self.cell_type == 'LSTMP':
             cells = rnn.MultiRNNCell([
                 rnn.LSTMCell(self.n_hidden[i])
                 for i in range(self.n_layers)
             ],
                                      state_is_tuple=True)
         cells = rnn.DropoutWrapper(cells,
                                    input_keep_prob=self.dropout_ph,
                                    output_keep_prob=self.dropout_ph)
     return cells
예제 #17
0
def main():
    """
    Run this command to generate the pb file
    1. mkdir model
    2. python rnn.py
    """
    dir = argv[1]
    n_steps = 2
    n_input = 10
    n_hidden = 20
    n_output = 5
    xs = tf.Variable(tf.random_uniform([4, n_steps, n_input]) + 10, name='input', dtype=tf.float32)
    weight = tf.Variable(tf.random_uniform([n_hidden, n_output]) + 10, name="weight", dtype=tf.float32)
    bias = tf.Variable(tf.random_uniform([n_output]) + 10, name="bias", dtype=tf.float32)
    x = tf.unstack(xs, n_steps, 1)
    cell = rnn.BasicRNNCell(n_hidden)
    output, states = rnn.static_rnn(cell, x, dtype=tf.float32)
    final = tf.nn.bias_add(tf.matmul(output[-1], weight), bias, name='output')
    output = tf.Variable(tf.random_uniform(tf.shape(final)),name='output_result')
    result = tf.assign(output, final)
    saver = tf.train.Saver()
    with tf.Session() as sess:
        init = tf.global_variables_initializer()
        sess.run(init)
        sess.run(result)
        checkpointpath = saver.save(sess, dir + '/model.chkp')
        tf.train.write_graph(sess.graph, dir, 'model.pbtxt')

    input_graph = dir + "/model.pbtxt"
    input_checkpoint = dir + "/model.chkp"
    output_node_names= ["output", "output_result"]
    output_graph = dir + "/model.pb"

    merge_checkpoint(input_graph, input_checkpoint, output_node_names, output_graph)
def dynamic_rnn():
    n_inputs = 3
    n_neurons = 5

    X = tf.placeholder(tf.float32, [None, None, n_inputs])
    seq_length = tf.placeholder(tf.int32, [None])
    basic_cell = rnn.BasicRNNCell(num_units=n_neurons)
    outputs, states = tf.nn.dynamic_rnn(basic_cell,
                                        X,
                                        dtype=tf.float32,
                                        sequence_length=seq_length)
    init = tf.global_variables_initializer()
    file_writer = tf.summary.FileWriter('14_tf_logs', tf.get_default_graph())
    file_writer.close()

    X_batch = np.array([
        [[0, 1, 2], [9, 8, 7]],  # instance 1
        [[3, 4, 5], [0, 0, 0]],  # instance 2
        [[3, 4, 5], [3, 6, 1]],  # instance 2
    ])
    seq_length_batch = np.array([2, 1, 2])
    with tf.Session() as sess:
        init.run()
        outputs_val, states_val = sess.run([outputs, states],
                                           feed_dict={
                                               X: X_batch,
                                               seq_length: seq_length_batch
                                           })

    print('Outputs:')
    print(outputs_val)
    print('States:')
    print(states_val)
예제 #19
0
        def Demo_Encoder(s_h, seq_lengths, scope='Demo_Encoder', reuse=False):
            with tf.variable_scope(scope, reuse=reuse) as scope:
                if not reuse: log.warning(scope.name)
                state_features = tf.reshape(
                    State_Encoder(tf.reshape(s_h, [-1, self.h, self.w, depth]),
                                  self.batch_size * max_demo_len, reuse=reuse),
                    [self.batch_size, max_demo_len, -1])

                with tf.variable_scope('cell_{}'.format(i), reuse=reuse):
                    if self.encoder_rnn_type == 'lstm':
                        cell = rnn.BasicLSTMCell(
                            num_units=self.num_lstm_cell_units,
                            state_is_tuple=True)
                    elif self.encoder_rnn_type == 'rnn':
                        cell = rnn.BasicRNNCell(num_units=self.num_lstm_cell_units)
                    elif self.encoder_rnn_type == 'gru':
                        cell = rnn.GRUCell(num_units=self.num_lstm_cell_units)
                    else:
                        raise ValueError('Unknown encoder rnn type')

                new_h, cell_state = tf.nn.dynamic_rnn(
                    cell=cell, dtype=tf.float32, sequence_length=seq_lengths,
                    inputs=state_features)
                all_states = new_h
                return all_states, cell_state.h, cell_state.c
예제 #20
0
def RNN(x, weights, biases):
    x = tf.reshape(x, [-1, n_input])
    x = tf.split(x, n_input, 1)
    # Setting the RNN mode to use the basic RNN only without using the LSTM
    rnn_cell = rnn.BasicRNNCell(n_hidden)
    # generate prediction
    outputs, states = rnn.static_rnn(rnn_cell, x, dtype=tf.float32)
    return tf.matmul(outputs[-1], weights['out']) + biases['out']
예제 #21
0
 def set_cell_type(self, c_type, n_hid):
     # choosing rnn type
     if c_type == "gru":
         return rnn.BasicLSTMCell(n_hid)
     elif c_type == "lstm":
         return rnn.GRUCell(n_hid)
     elif c_type == "rnn":
         return rnn.BasicRNNCell(n_hid)
예제 #22
0
    def RNN(features):

        features = tf.unstack(features, noFrames, 0)

        rnn_cell = rnn.BasicRNNCell(embeddingSize, activation=tf.nn.tanh, reuse=tf.AUTO_REUSE)

        outputs, states = rnn.static_rnn(rnn_cell, features, dtype=tf.float32)

        return outputs
예제 #23
0
 def __init__(self, num_units):
     super(Grid1BasicRNNCell, self).__init__(
         num_units=num_units,
         num_dims=1,
         input_dims=0,
         output_dims=0,
         priority_dims=0,
         tied=False,
         cell_fn=lambda n, i: rnn.BasicRNNCell(num_units=n, input_size=i))
예제 #24
0
def rnn_model(input,
              num_outputs,
              scope,
              reuse=False,
              num_units=128,
              length=10):
    # This is the Recurrent Neural Network for the attention unit. Binary classifier.
    with tf.variable_scope(scope, reuse=reuse):
        out = input
        cell = rnn.BasicRNNCell(num_units=num_units, activation="tanh")
예제 #25
0
    def policy_network(self,
                       mlp_input,
                       output_size,
                       scope,
                       size=config.baseline_layer_size,
                       n_layers=config.n_layers,
                       output_activation=None):

        if str(config.env_name).startswith("Fourrooms"):

            self.state_embedding = tf.tile(
                tf.one_hot(indices=tf.cast(mlp_input, dtype=tf.int32),
                           depth=self.env.nS), [1, config.num_sub_policies, 1])
            num_actions = self.env.action_space.n

        else:
            self.state_embedding = tf.tile(tf.expand_dims(mlp_input, axis=1),
                                           [1, config.num_sub_policies, 1])
            num_actions = self.env.action_space.shape[0]

        rnn_cell = rnn.BasicRNNCell(num_units=num_actions)

        self.sub_policies, states = tf.nn.dynamic_rnn(
            cell=rnn_cell,
            inputs=self.state_embedding,
            dtype=tf.float32,
            scope='subpolicy')

        lstm_cell = rnn.BasicLSTMCell(num_units=config.num_sub_policies)

        concatenated = tf.concat([self.sub_policies, self.state_embedding],
                                 axis=2)

        if config.freeze_sub_policy:
            concatenated = tf.stop_gradient(concatenated, name='stop')

        self.out, states = tf.nn.dynamic_rnn(cell=lstm_cell,
                                             inputs=concatenated,
                                             dtype=tf.float32,
                                             scope='master')
        last_output = self.out[:, -1, :]

        self.chosen_index = tf.argmax(last_output, axis=1)
        # self.weights = tf.nn.softmax(logits=last_output, dim=
        self.weights = tf.one_hot(indices=self.chosen_index,
                                  depth=config.num_sub_policies)

        final_policy = tf.reduce_sum(tf.expand_dims(self.weights, axis=2) *
                                     self.sub_policies,
                                     axis=1)

        if config.sub_policy_index > -1:
            final_policy = self.sub_policies[:, config.sub_policy_index, :]

        return final_policy
예제 #26
0
def build_cell(cell_type, hidden_units, initializer=None, num_layers=1):
	if num_layers > 1:
		cell = rnn.MultiRNNCell([build_cell(cell_type, hidden_units, initializer, 1) for _ in range(num_layers)])
	else:
		if cell_type == "tf_lstm":
			cell = rnn.LSTMCell(hidden_units, initializer=initializer)
		elif cell_type == "tf_gru":
			cell = rnn.GRUCell(hidden_units)
		else:
			cell = rnn.BasicRNNCell(hidden_units)
	return cell
예제 #27
0
 def __init__(self, num_units, state_is_tuple=True, output_is_tuple=True):
     super(Grid1BasicRNNCell,
           self).__init__(num_units=num_units,
                          num_dims=1,
                          input_dims=0,
                          output_dims=0,
                          priority_dims=0,
                          tied=False,
                          cell_fn=lambda n: rnn.BasicRNNCell(num_units=n),
                          state_is_tuple=state_is_tuple,
                          output_is_tuple=output_is_tuple)
예제 #28
0
 def call_cell(cell_type, nun_hid, dropout):
     cell = None
     # choosing rnn type
     if cell_type == "gru":
         cell = rnn.GRUCell(nun_hid)
     elif cell_type == "lstm":
         cell = CustomLSTMCell(nun_hid, forget_bias=1.0)
     elif cell_type == "rnn":
         cell = rnn.BasicRNNCell(nun_hid)
     # wrapping in dropout
     return tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=1.0 - dropout)
예제 #29
0
def RNN(x, weight, bias):
    # Prepare data shape to match `rnn` function requirements
    # Current data input shape: (batch_size, timesteps, n_input)
    # Required shape: 'timesteps' tensors list of shape (batch_size, n_input)

    # Unstack to get a list of 'timesteps' tensors of shape (batch_size, n_input)
    x = tf.unstack(x, timesteps, 1)
    cell = rnn.BasicRNNCell(num_hidden)
    cell = rnn_cell.DropoutWrapper(cell, output_keep_prob=0.5)
    # cell = rnn_cell.MultiRNNCell([cell] * 3)
    outputs, states = rnn.static_rnn(cell, x, dtype=tf.float32)
    return tf.matmul(outputs[-1], weight) + bias
예제 #30
0
    def build(self, sk_tf):
        x = tf.transpose(sk_tf, [1, 0, 2])  # (n_steps, ds, n_input)
        x = tf.reshape(x, [-1, self.n_input])  # (n_steps * ds, n_input)
        x = tf.split(x, self.n_steps,
                     axis=0)  # an array with n_steps tensors: (ds, n_input)
        # x = tf.split(0, n_steps, x) # an array with n_steps tensors: (ds, n_input) # for 0.12.1

        rnn_fw_cell = rnn.BasicRNNCell(self.n_hidden)
        rnn_bw_cell = rnn.BasicRNNCell(self.n_hidden)
        output_tf_list, final_fw_tf, final_bk_tf = \
            rnn.static_bidirectional_rnn(rnn_fw_cell, rnn_bw_cell, x, dtype=tf.float32)

        # rnn_fw_cell = rnn_cell.BasicRNNCell(n_hidden)
        # rnn_bw_cell = rnn_cell.BasicRNNCell(n_hidden)
        # output_tf_list, final_fw_tf, final_bk_tf = \
        #     rnn.bidirectional_rnn(rnn_fw_cell, rnn_bw_cell, x, dtype=tf.float32) # for 0.12.1

        # rnn_output contains 3 elements:
        # 1. a list of n_steps tensors, each one has the shape (ds, 2 * n_hidden)
        # 2. final state of forward cell (ds, n_hidden)
        # 3. final state of backward cell (ds, n_hidden)

        # output_tf_list: a list of n_steps tensors, each one: (ds, 2 * n_hidden)
        output_tf = tf.stack(output_tf_list, axis=0)
        # output_tf: (n_steps, ds, 2 * n_hidden)
        sk_rnn_state_tf = tf.transpose(output_tf, [1, 0, 2])
        # (ds, n_step, 2 * n_hidden): get the hidden vector at each state

        if self.combine == 'FwBk':
            # rnn_output_tf: (ds, 2 * n_hidden)
            rnn_output_tf = tf.concat(values=[final_fw_tf, final_bk_tf],
                                      axis=1)
            # rnn_output_tf = tf.concat(concat_dim=1, values=[final_fw_tf, final_bk_tf]) # for 0.12.1

        LogInfo.logs('sk_rnn_state_tf compiled. %s',
                     sk_rnn_state_tf.get_shape().as_list())
        LogInfo.logs('rnn_output_tf compiled. %s',
                     rnn_output_tf.get_shape().as_list())
        LogInfo.logs('* SkeletonRNN built.')
        return sk_rnn_state_tf, rnn_output_tf