def recurrent_neural_network(x):
    layer = {
        'weights': tf.Variable(tf.random_normal([rnn_size, n_classes])),
        'biases': tf.Variable(tf.random_normal([n_classes]))
    }

    x = tf.transpose(x, [1, 0, 2])
    x = tf.reshape(x, [-1, chunk_size])
    x = tf.split(x, n_chunks, 0)

    lstm_cell = rnn_cell.BasicLSTMCell(rnn_size, state_is_tuple=True)
    outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)

    output = tf.matmul(outputs[-1], layer['weights']) + layer['biases']

    return output
Beispiel #2
0
def recurrent_neural_network3(x, y):
    # x and y are placeholders

    #initialise params for linear layers
    lin_layer1 = {
        'weights1':
        tf.Variable(tf.truncated_normal([rnn_size, 100], stddev=0.1)),
        'bias1': tf.Variable(tf.truncated_normal([100]))
    }

    lin_layer2 = {
        'weights': tf.Variable(tf.truncated_normal([100, dim_y], stddev=0.1)),
        'bias': tf.Variable(tf.truncated_normal([dim_y]))
    }

    # reshape input data
    x = tf.transpose(x, [1, 0, 2])
    x = tf.reshape(x, [-1, strip_size])
    x = tf.split(0, n_strip, x)

    # initialise cell
    cell = rnn_cell.BasicLSTMCell(rnn_size, state_is_tuple=True)

    cell = tf.nn.rnn_cell.DropoutWrapper(cell, input_keep_prob=drop_prob)

    cell = tf.nn.rnn_cell.MultiRNNCell(
        [cell] * n_layer, state_is_tuple=True)  #stack  by num layers

    cell = tf.nn.rnn_cell.DropoutWrapper(cell, input_keep_prob=drop_prob)

    out, _ = rnn.rnn(cell, x, dtype=tf.float32)

    last_out = out[-1]  # only interested in last output of RNN (many to one)

    # linear layer 1
    lin_1 = tf.matmul(last_out, lin_layer1['weights1']) + lin_layer1['bias1']

    # batch normalisation
    bn_1 = batch_normaliz(lin_1)

    # ReLU
    hid_1 = tf.nn.relu(bn_1)

    # linear layer 2
    lin_2 = tf.matmul(hid_1, lin_layer2['weights']) + lin_layer2['bias']

    return lin_2  # == y_pred
  def __init__(self, config=None, mode=None):
    self.config = config
    self.mode = mode

    self.reader = utils.DataReader(seq_len=config.seq_length, batch_size=config.batch_size, data_filename=config.data_filename)

    self.cell = rnn_cell.BasicLSTMCell(config.rnn_size, state_is_tuple=True)

    self.input_data = tf.placeholder(tf.int32, [None, config.input_length])
    self.targets = tf.placeholder(tf.int32, [None, 1])
    self.initial_state = self.cell.zero_state(tf.shape(self.targets)[0], tf.float32)

    with tf.variable_scope("input_embedding"):
      embedding = tf.get_variable("embedding", [config.vocab_size, config.rnn_size])
      inputs = tf.split(1, config.input_length, tf.nn.embedding_lookup(embedding, self.input_data))
      inputs = [tf.squeeze(input, [1]) for input in inputs]

    with tf.variable_scope("send_to_rnn"):
      state = self.initial_state
      output = None

      for i, input in enumerate(inputs):
        if i > 0:
          tf.get_variable_scope().reuse_variables()
        output, state = self.cell(input, state)

    with tf.variable_scope("softmax"):
      softmax_w = tf.get_variable("softmax_w", [config.rnn_size, config.vocab_size])
      softmax_b = tf.get_variable("softmax_b", [config.vocab_size])
      self.logits = tf.matmul(output, softmax_w) + softmax_b
      self.probs = tf.nn.softmax(self.logits)

    loss = seq2seq.sequence_loss_by_example([self.logits],
                                            [tf.reshape(self.targets, [-1])],
                                            [tf.ones([config.batch_size])],
                                            config.vocab_size)

    self.cost = tf.reduce_mean(loss)
    self.final_state = state

    # self.lr = tf.Variable(0.001, trainable=False)
    tvars = tf.trainable_variables()
    grads, _ = tf.clip_by_global_norm(tf.gradients(self.cost, tvars),
                                      config.grad_clip)
    optimizer = tf.train.AdamOptimizer()#self.lr)
    self.train_op = optimizer.apply_gradients(zip(grads, tvars))
    def __init__(self, hidden_size, keep_prob, max_iteration, max_pool,
                 batch_size):
        """
        Inputs:
          hidden_size: int. Hidden size of the RNN
          keep_prob: Tensor containing a single scalar that is the keep probability (for dropout)
        """
        self.hidden_size = hidden_size
        self.keep_prob = keep_prob
        self.max_iteration = max_iteration
        self.max_pool = max_pool
        self.batch_size = batch_size

        self.rnn_cell_fw = rnn_cell.BasicLSTMCell(self.hidden_size,
                                                  forget_bias=1.0)
        self.rnn_cell_fw = DropoutWrapper(self.rnn_cell_fw,
                                          input_keep_prob=self.keep_prob)
Beispiel #5
0
def rnn_model(x):
    rnn_layer = {
        'Weights': tf.Variable(tf.random_normal([rnn_layer_size,
                                                 num_classes])),
        'Biases': tf.Variable(tf.random_normal([num_classes]))
    }

    print("x's shape", x.shape)
    x = tf.transpose(x, [1, 0, 2])
    print("Shape after transpose:", x.shape)
    x = tf.reshape(x, [-1, chunk_size])
    print("Shape after reshape", x.shape)
    x = tf.split(x, num_of_chunk, 0)
    lstm_cell = rnn_cell.BasicLSTMCell(rnn_layer_size, state_is_tuple=True)
    outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)
    output = tf.matmul(outputs[-1], rnn_layer['Weights']) + rnn_layer['Biases']
    return output
Beispiel #6
0
def seq_predict_model(X, w, b, time_step_size, vector_size):
    # input X shape: [batch_size, time_step_size, vector_size]
    # transpose X to [time_step_size, batch_size, vector_size]
    X = tf.transpose(X, [1, 0, 2])
    # reshape X to [time_step_size * batch_size, vector_size]
    X = tf.reshape(X, [-1, vector_size])
    # split X, array[time_step_size], shape: [batch_size, vector_size]
    X = tf.split(X, time_step_size, 0)

    # LSTM model with state_size = 10
    cell = rnn_cell.BasicLSTMCell(num_units=10,
                                  forget_bias=1.0,
                                  state_is_tuple=True)
    outputs, _states = rnn.static_rnn(cell, X, dtype=tf.float32)

    # Linear activation
    return tf.matmul(outputs[-1], w) + b, cell.state_size
Beispiel #7
0
def RNN(x, weights, biases):
    # x now has a shape like this (batch_size,sequence_length,input_dim)

    x = tf.transpose(x, [1, 0, 2])
    # x now has a shape like (sequence_length,batch_size,input_dim)

    x = tf.reshape(x, [-1, input_dim])
    # x now has a shape like (sequence_length*batch_size,input_dim)

    x = tf.split(0, sequence_length, x)
    # x now is a list of shape (sequence_length,input_dim), list contains num=batch elements

    lsmt_cell = rnn_cell.BasicLSTMCell(hidden_dim, forget_bias=1.0)

    outputs, states = rnn.rnn(lsmt_cell, x, dtype=tf.float32)

    return tf.matmul(outputs[-1], weights)
def RNN(x, weights, biases):
    #重构数据shape以符合rnn函数参数要求
    #[batchsize,n_steps,n_input] to nesteps [batch_size,n_input]
    # Prepare data shape to match `rnn` function requirements
    # Current data input shape: (batch_size, n_steps, n_input)
    # Required shape: 'n_steps' tensors list of shape (batch_size, n_input)
    #
    x = tf.transpose(x, [1, 0, 2])
    x = tf.reshape(x, [-1, n_input])
    x = tf.split(0, n_steps, x)
    #lstm cell
    lstm_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0)
    #get lstm cell output
    outputs, states = rnn.rnn(lstm_cell, x, dtype = tf.float32)
    #

    return tf.matmul(outputs[-1], weights['out']) + biases['out']
            def RNN(x):
                x_list = []
                for i in range(n_sequence):
                    x_list.append(x)

                with tf.variable_scope('rnn'):
                    # Define a lstm cell with tensorflow
                    lstm_cell = rnn_cell.BasicLSTMCell(n_hidden)

                    # Get lstm cell output
                    outputs, states = rnn.rnn(lstm_cell, x_list, dtype=tf.float32)

                    logits = []
                    for i in range(len(outputs)):
                        with tf.variable_scope('linear'+str(i)):
                            logits.append(linear(outputs[i], n_hidden, 2))
                    return logits
Beispiel #10
0
    def recurrent_neural_network(self, x):
        layer = {
            'weights':
            tf.Variable(tf.random_normal([self.rnn_size, self.n_classes])),
            'biases':
            tf.Variable(tf.random_normal([self.n_classes]))
        }

        x = tf.transpose(x, [1, 0, 2])
        x = tf.reshape(x, [-1, self.chunk_size])
        x = tf.split(x, self.max_length, 0)

        lstm_cell = rnn_cell.BasicLSTMCell(self.rnn_size, reuse=tf.AUTO_REUSE)
        outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)

        output = tf.matmul(outputs[-1], layer['weights']) + layer['biases']
        return output
def recurrent_neural_network(x):
    """AdamOptimiser is used an optimiser 
        Weight's tensor is of rnn_size """
    layer = {
        'weight': tf.Variable(tf.random_normal([rnn_size, n_classes])),
        'biases': tf.Variable(tf.random_normal([n_classes]))
    }
    #shaping of the date so that it can be fed to RNN cell (LSTM)
    x = tf.transpose(x, [1, 0, 2])
    x = tf.reshape(x, [-1, chunk_size])
    x = tf.split(x, n_chunks, 0)
    # LSTM Networks. Long Short Term Memory networks – usually just called “LSTMs” – are a special kind of RNN, capable of learning long-term dependencies
    lstm_cell = rnn_cell.BasicLSTMCell(rnn_size)
    outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)
    output = tf.matmul(outputs[-1], layer['weight']) + layer['biases']

    return output
Beispiel #12
0
def create_model(max_word_id, is_test=False):
    GO_VALUE = max_word_id + 1
    network = tflearn.input_data(shape=[None, max_seq_len + max_seq_len],
                                 dtype=tf.int32,
                                 name="XY")
    encoder_inputs = tf.slice(network, [0, 0], [-1, max_seq_len],
                              name="enc_in")
    encoder_inputs = tf.unpack(encoder_inputs, axis=1)
    decoder_inputs = tf.slice(network, [0, max_seq_len], [-1, max_seq_len],
                              name="dec_in")
    decoder_inputs = tf.unpack(decoder_inputs, axis=1)
    go_input = tf.mul(tf.ones_like(decoder_inputs[0], dtype=tf.int32),
                      GO_VALUE)
    decoder_inputs = [go_input] + decoder_inputs[:max_seq_len - 1]
    num_encoder_symbols = max_word_id + 1  # 从0起始
    num_decoder_symbols = max_word_id + 2  # 包括GO

    cell = rnn_cell.BasicLSTMCell(16 * max_seq_len, state_is_tuple=True)

    model_outputs, states = seq2seq.embedding_rnn_seq2seq(
        encoder_inputs,
        decoder_inputs,
        cell,
        num_encoder_symbols=num_encoder_symbols,
        num_decoder_symbols=num_decoder_symbols,
        embedding_size=max_word_id,
        feed_previous=is_test)

    network = tf.pack(model_outputs, axis=1)

    targetY = tf.placeholder(shape=[None, max_seq_len],
                             dtype=tf.int32,
                             name="Y")

    network = tflearn.regression(network,
                                 placeholder=targetY,
                                 optimizer='adam',
                                 learning_rate=learning_rate,
                                 loss=sequence_loss,
                                 metric=accuracy,
                                 name="Y")

    print "begin create DNN model"
    model = tflearn.DNN(network, tensorboard_verbose=0, checkpoint_path=None)
    print "create DNN model finish"
    return model
Beispiel #13
0
    def __init__(self, args, infer=False):
        self.args = args
        if infer == True:
            args.batch_size = 1
            args.seq_length = 1
        #

        cell = rnn_cell.BasicLSTMCell(args.state_size)  #
        self.cell = cell = rnn_cell.MultiRNNCell([cell] * args.num_layers)

        self.input_data = tf.placeholder(tf.int32,
                                         [args.batch_size, args.seq_length])
        self.targets = tf.placeholder(tf.int32,
                                      [args.batch_size, args.seq_length])
        self.initial_state = cell.zero_state(args.batch_size, tf.float32)

        with tf.variable_scope('rnnlm'):
            w = tf.get_variable('softmax_w', [args.rnn_size, args.vocab_size])
            b = tf.get_variable('softmax_b', [args.vocab_size])

            with tf.device('/cpu:0'):
                embedding = tf.get_variable('embedding',
                                            [args.vocab_size, args.rnn_size])
                inputs = tf.nn.embedding_lookup(embedding, self.input_data)

        outputs, last_state = tf.nn.dynamic_rnn(
            self.cell, inputs, initial_state=self.initial_state, scope='rnnlm')
        output = tf.reshape(outputs, [-1, args.rnn_size])

        self.logits = tf.matmul(output, w) + b
        self.probs = tf.nn.softmax(self.logits)
        targets = tf.reshape(self.targets, [-1])
        loss = seq2seq.sequence_loss_by_example(
            [self.logits], [targets],
            [tf.ones_like(targets, dtype=tf.float32)])
        self.cost = tf.reduce_mean(loss)

        self.last_state = last_state

        self.lr = tf.Variable(0.0, trainable=False)  #
        optimizer = tf.train.AdamOptimizer(self.lr)
        tvars = tf.trainable_variables()
        grads = tf.gradients(self.cost, tvars)
        grads, _ = tf.clip_by_global_norm(grads, args.grad_clip)
        self.train_op = optimizer.apply_gradients(zip(grads, tvars))
Beispiel #14
0
def recurrent_neural_network(x):

    layer = {
        'weights': tf.Variable(tf.random_normal([rnn_size, n_classes])),
        'biases': tf.Variable(tf.random_normal([n_classes]))
    }

    # transpose 這個 funtion 是對矩陣做 不同維度座標軸的轉換,這邊把一張圖片轉成以每列為單位的輸入
    x = tf.transpose(x, [1, 0, 2])
    x = tf.reshape(x, [-1, chunk_size])
    x = tf.split(0, n_chunks, x)

    lstm_cell = rnn_cell.BasicLSTMCell(rnn_size)
    outputs, states = rnn.rnn(lstm_cell, x, dtype=tf.float32)

    output = tf.matmul(outputs[-1], layer['weights']) + layer['biases']

    return output
Beispiel #15
0
    def add_model(self, inputs):
        print "Loading basic lstm model.."

        for i in range(self.config.rnn_numLayers):
            with tf.variable_scope('rnnLayer' + str(i)):
                lstm_cell = rnn_cell.BasicLSTMCell(self.config.hidden_size)
                outputs, _ = rnn.rnn(lstm_cell,
                                     inputs,
                                     dtype=tf.float32,
                                     sequence_length=self.seqLen_placeholder)
                inputs = outputs
        final_state = tf.add_n(outputs)

        logits = tf.contrib.layers.fully_connected(final_state,
                                                   self.config.class_num,
                                                   activation_fn=None)

        return logits
Beispiel #16
0
def recurrent_neural_network(input):
    # nput data * weights + bias
    layer = {
        'weights': tf.Variable(tf.random_normal([rnn_size, n_classes])),
        'biases': tf.Variable(tf.random_normal([n_classes]))
    }

    input = tf.transpose(input, [1, 0, 2])
    input = tf.reshape(input, [-1, chunk_size])
    input = tf.split(0, chunk_size, input)

    lstm_cell = rnn_cell.BasicLSTMCell(rnn_size)
    outputs, states = rnn.rnn(lstm_cell, input, dtype=tf.float32)

    output = tf.matmul(outputs[-1], layer['weights']) + layer['biases']

    saver = tf.train.Saver()
    return output, saver
Beispiel #17
0
def RNN(X, weights, biases):

    # Prepare data shape to match 'rnn' function requirements
    # Current data input shape: (batch_size , n_steps, n_input)
    # Required shape : 'n_steps' tensors list of shape (batch_size, n_input)

    # Permuting batch_size and n_steps
    X = tf.transpose(x, [1, 0, 2])  # ( n_steps, batch_size , n_input)
    # Reshaping to (n_steps*batch_size , n_input)
    X = tf.reshape(x, [-1, n_input])
    # Split to get a list of 'n_steps' tensors of shape (batch_size, n_input)
    X = tf.split(0, n_steps, X)

    # Define a lstm cell with tensorflow
    lstm_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0)

    # Get lstm cell output
    outputs, states = rnn.rnn(lstm_cell, x, dtype=tf.float32)
 def create_cell(hidden_size, num_layers):
     single_cell = rnn_cell.GRUCell(hidden_size)
     if use_lstm:
         print("Using LSTM")
         single_cell = rnn_cell.BasicLSTMCell(hidden_size,
                                              state_is_tuple=True)
         #single_cell = rnn_cell.BasicLSTMCell(hidden_size)
     if not forward_only:
         # always use dropout; set keep_prob=1 if not dropout
         print("Training mode; dropout used!")
         single_cell = rnn_cell.DropoutWrapper(
             single_cell, output_keep_prob=output_keep_prob)
     cell = single_cell
     if num_layers > 1:
         cell = tf.nn.rnn_cell.MultiRNNCell([single_cell] * num_layers,
                                            state_is_tuple=True)
         #cell = tf.nn.rnn_cell.MultiRNNCell([single_cell] * num_layers)
     return cell
def rnn_model(x):
    layer = {
        'weights': tf.Variable(tf.random_normal([rnn_size, n_classes])),
        'biases': tf.Variable(tf.random_normal([n_classes]))
    }

    x = tf.transpose(x, [1, 0, 2])  # what does this do?
    x = tf.reshape(x, [-1, chunk_size])
    x = tf.split(x, n_chunks, 0)

    lstm_cell = rnn_cell.BasicLSTMCell(rnn_size,
                                       state_is_tuple=True)  # what is this
    # init_state = lstm_cell.zero_state(batch_size, dtype=tf.float32)
    outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)

    output = tf.matmul(outputs[-1], layer['weights']) + layer['biases']

    return output
Beispiel #20
0
def RNN(x, weights, biases):

    # Prepare data shape to match `rnn` function requirements
    # Current data input shape: (batch_size, n_steps, n_input)
    # Permuting batch_size and n_steps
    x = tf.transpose(x, [1, 0, 2])
    # Reshaping to (n_steps*batch_size, n_input)
    x = tf.reshape(x, [-1, n_input])
    # Split to get a list of 'n_steps' tensors of shape (batch_size, n_input)
    x = tf.split(0, n_steps, x)
    # lstm cell
    lstm_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias = 1.0)

    # Get lstm output
    output, states = rnn.rnn(lstm_cell, x, dtype = tf.float32)

    # Output for the next batch
    return [tf.matmul(output_x, weights) + biases for output_x in output]
Beispiel #21
0
def dynamic_lstm_model_fn(batch_size, state_size, max_steps):
    # We make inputs and sequence_length constant so that multiple session.run
    # calls produce the same result.
    inputs = constant_op.constant(np.random.rand(batch_size, max_steps,
                                                 state_size),
                                  dtype=dtypes.float32)
    sequence_length = constant_op.constant(np.random.randint(0,
                                                             size=[batch_size],
                                                             high=max_steps +
                                                             1),
                                           dtype=dtypes.int32)

    cell = rnn_cell.BasicLSTMCell(state_size)
    initial_state = cell.zero_state(batch_size, dtypes.float32)
    return inputs, rnn.dynamic_rnn(cell,
                                   inputs,
                                   sequence_length=sequence_length,
                                   initial_state=initial_state)
Beispiel #22
0
def RNN(x, weights, biases, n_input, n_steps, n_hidden):
    print '------------we are in RNN NOW! ------------'
    # print 'x:', x
    # print 'weights:', weights
    # print 'biases:', biases
    # print 'n_input:', n_input
    # print 'n_steps:', n_steps
    # print 'n_hidden:', n_hidden

    # Prepare data shape to match `rnn` function requirements
    # Current data input shape: (batch_size, n_steps, n_input)
    # Required shape: 'n_steps' tensors list of shape (batch_size, n_input)

    # Permuting batch_size and n_steps ---- Permutes the dimensions according to perm.
    # tf.transpose(a, perm=None, name='transpose')
    # a: A Tensor.
    # perm: A permutation of the dimensions of a.
    # name: A name for the operation (optional).
    x = tf.transpose(x, [1, 0, 2])

    # Reshaping to (n_steps*batch_size, n_input)
    # Given tensor, this operation returns a tensor that has the same values as tensor with shape shape.
    # tf.reshape(tensor, shape, name=None)
    # tensor: A Tensor
    # shape: A Tensor of type int32. Defines the shape of the output tensor. -1 is inferred to be n_input:
    x = tf.reshape(x, [-1, n_input])

    # Split to get a list of 'n_steps' tensors of shape (batch_size, n_input)
    # Splits a tensor into num_split tensors along one dimension.
    # tf.split(split_dim, num_split, value, name='split')
    # split_dim: A 0-D int32 Tensor. The dimension along which to split. Must be in the range [0, rank(value)).
    # num_split: A Python integer. The number of ways to split.
    # value: The Tensor to split.
    x = tf.split(0, n_steps, x)

    # Define a lstm cell with tensorflow
    lstm_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0, state_is_tuple=True)

    # Get lstm cell output
    outputs, states = rnn.rnn(lstm_cell, x, dtype=tf.float32)

    # Linear activation, using rnn inner loop last output
    # Multiplies matrix a by matrix b, producing a * b.
    return tf.matmul(outputs[-1], weights['out']) + biases['out']
    def create_model(self, data):

        layer = {
            'weights':
            tf.Variable(tf.random_normal([self.rnn_size, self.n_classes])),
            'biases':
            tf.Variable(tf.random_normal([self.n_classes]))
        }

        data = tf.transpose(data, [1, 0, 2])
        data = tf.reshape(data, [-1, self.word_embedding_size])
        data = tf.split(0, self.max_doc_size, data)

        lstm_cell = rnn_cell.BasicLSTMCell(self.rnn_size)
        outputs, states = rnn.rnn(lstm_cell, data, dtype=tf.float32)

        output = tf.matmul(outputs[-1], layer['weights']) + layer['biases']

        return output
def recurrent_neural_network(x):
    #input_data * weights + biases
    #biases are needed because if all the input data was zero, no neuron would ever fire

    layer = {
        'weights': tf.Variable(tf.random_normal([rnn_size, n_classes])),
        'biases': tf.Variable(tf.random_normal([n_classes]))
    }

    x = tf.transpose(x, [1, 0, 2])
    x = tf.reshape(x, [-1, chunk_size])
    x = tf.split(0, n_chunks, x)

    lstm_cell = rnn_cell.BasicLSTMCell(rnn_size)

    outputs, states = rnn.rnn(lstm_cell, x, dtype=tf.float32)
    output = tf.matmul(outputs[-1], layer['weights']) + layer['biases']

    return output
Beispiel #25
0
    def baseline_forward(self, X, size, n_class):
        shape = X.get_shape()
        # batch_size x sentence_length x word_length -> batch_size x sentence_length x word_length
        _X = tf.transpose(X, [1, 0, 2])
        _X = tf.reshape(_X, [-1, int(shape[2])
                             ])  # (batch_size x sentence_length) x word_length
        seq = tf.split(0, int(shape[1]),
                       _X)  # sentence_length x (batch_size x word_length)

        with tf.name_scope("LSTM"):
            lstm_cell = rnn_cell.BasicLSTMCell(size, forget_bias=1.0)
            outputs, states = rnn.rnn(lstm_cell, seq, dtype=tf.float32)

        with tf.name_scope("LSTM-Classifier"):
            W = tf.Variable(tf.random_normal([size, n_class]), name="W")
            b = tf.Variable(tf.random_normal([n_class]), name="b")
            output = tf.matmul(outputs[-1], W) + b

        return output
Beispiel #26
0
def reccurent_neural_network(data):
    layer_output = data

    for layer in network:
        layer.weights = tf.Variable(
            tf.random_normal([rnn_size, number_of_classes]))
        layer.biases = tf.Variable(tf.random_normal([number_of_classes]))

        data = tf.transpose(data, [1, 0, 2])
        data = tf.reshape(x, [-1, chunk_size])
        data = tf.split(0, n_chunks, data)

        lstm_cell = rnn_cell.BasicLSTMCell(rnn_size)
        outputs, states = rnn.rnn(lstm_cell, data, dtype=tf.float32)

        layer_output = layer.activation(
            tf.matmul(outputs[-1], layer.weights) + layer.biases)

    return layer_output
Beispiel #27
0
    def RNN(x, weights, biases):
        x = tf.transpose(x, [1, 0, 2])
        # used to be (batch_size, n_stpe, n_input)
        # Reshaping to (n_steps*batch_size, n_input)
        x = tf.reshape(x, [-1, n_input])
        # Split to get a list of 'n_steps' tensors of shape (batch_size, n_input)
        x = tf.split(0, n_steps, x)
        # Define a lstm cell with tensorflow

        lstm_cell = rnn_cell.BasicLSTMCell(n_hidden,
                                           forget_bias=1.0,
                                           state_is_tuple=True)
        # lstm_cell = rnn_cell.MultiRNNCell(lstm_cell,state_is_tuple=True)

        # Get lstm cell output
        outputs, states = rnn.rnn(lstm_cell, x, dtype=tf.float32)

        # Linear activation, using rnn inner loop last output
        return tf.matmul(outputs[-1], weights['out']) + biases['out']
Beispiel #28
0
def recurrent_neural_network_model(x):
    layer = {
        'weights': tf.Variable(tf.random_normal([rnn_size, n_classes])),
        'biases': tf.Variable(tf.random_normal([n_classes]))
    }

    # making data compatible to the input required by rnn cell.
    x = tf.transpose(x, [1, 0, 2])
    x = tf.reshape(x, [-1, chunk_size])
    x = tf.split(0, n_chunks, x)

    lstm_cell = rnn_cell.BasicLSTMCell(rnn_size)

    outputs, states = rnn.rnn(lstm_cell, x, dtype=tf.float32)

    # matrix multiplication of the final output times the sum of weights and biases
    output = tf.matmul(outputs[-1], layer['weights']) + layer['biases']

    return output
Beispiel #29
0
    def createRNN(self):
        with self.sess.graph.as_default():
            # input layer #
            with tf.name_scope("input"):
                self.s = tf.placeholder("float", [None, DAYS_RANGE, INPUT_DIM], name='input_state')
                s_tran = tf.transpose(self.s, [1, 0, 2])
                s_re = tf.reshape(s_tran, [-1, INPUT_DIM])
                s_list = tf.split(0, DAYS_RANGE, s_re) ## split s to DAYS_RANGE tensor of shape [BATCH, INPUT_DIM]

            lstm_cell = rnn_cell.BasicLSTMCell(1024, forget_bias=1.0, state_is_tuple=True)
            lstm_stack = rnn_cell.MultiRNNCell([lstm_cell]*3, state_is_tuple=True)

            lstm_output, hidden_states = rnn.rnn(lstm_stack, s_list, dtype='float', scope='LSTMStack') # out: [timestep, batch, hidden], state: [cell, c+h, batch, hidden]
                
            h_fc1 = self.FC_layer(lstm_output[-1], [1024, 1024], name='h_fc1', activate=True)
            h_fc2 = self.FC_layer(h_fc1, [1024, ACTIONS], name='h_fc2', activate=False)

            # output layer
            self.pred_action = tf.nn.softmax(h_fc2)
def recurrent_neural_network_model(data):
    # (input_data  * weights) + biases
    # if all the input_data is 0, then due to biases, atleast some neurons would fire signal
    layer = {
        'weights': tf.Variable(tf.random_normal([rnn_size, n_classes])),
        'biases': tf.Variable(tf.random_normal([n_classes]))
    }

    data = tf.transpose(data, [1, 0, 2])
    # -1 can be referred to as data's size
    data = tf.reshape(x, [-1, chunk_size])
    data = tf.split(0, n_chunks, data)

    lstm_cell = rnn_cell.BasicLSTMCell(rnn_size)
    outputs, state = rnn.static_rnn(lstm_cell, data, dtype=tf.float32)

    output = tf.add(tf.matmul(outputs[-1], layer['weights']), layer['biases'])
    output = tf.nn.relu(output)

    return output