예제 #1
0
def RNN(x, weights, biases):
    with WeightsInitializer(initializer=init_ops.Constant(0.1)) as vs:
        bw_cell = LSTMCell(n_hidden)
        fw_cell = LSTMCell(n_hidden)
        result, state = bidirectional_dynamic_rnn(fw_cell, bw_cell,
                                                  symbols_in_keys)
    "Dense in this case should be out of WeightsInitializer scope because we are passing constants"
    out_l = Dense(10,
                  kernel_initializer=init_ops.Constant(out_weights),
                  bias_initializer=init_ops.Constant(out_biases))
    fw_result, bw_result = result
    h = np.concatenate((fw_result, bw_result), -1)
    pred = out_l(h[0][-1].reshape(1, vocab_size))
    return pred
예제 #2
0
def encoding_layer(rnn_cell_size, sequence_len, n_layers, rnn_inputs, dropout_prob):
    if(encoder_type=="bi" and n_layers%2 == 0):
        n_bi_layer=int(n_layers/2)
        encoding_output, encoding_state=bidirectional_dynamic_rnn(get_rnn_cell(rnn_cell_size, dr_prob,n_bi_layer,debug),get_rnn_cell(rnn_cell_size, dr_prob,n_bi_layer,debug), rnn_inputs)
        print("encoding_state:",encoding_state)
        if(n_bi_layer > 1):
            #layers/2
            """
            Forward-First(0)
            ((LSTMStateTuple({'c': array([[0.30450274, 0.30450274, 0.30450274, 0.30450274, 0.30450274]]),
              'h': array([[0.16661529, 0.16661529, 0.16661529, 0.16661529, 0.16661529]])}),
            Forward-Second(1)
             LSTMStateTuple({'c': array([[0.27710986, 0.07844026, 0.18714019, 0.28426586, 0.28426586]]),
              'h': array([[0.15019765, 0.04329417, 0.10251247, 0.1539225 , 0.1539225 ]])})),
            Backward-First(0)
            (LSTMStateTuple({'c': array([[0.30499766, 0.30499766, 0.30499766, 0.30499766, 0.30499766]]),
              'h': array([[0.16688152, 0.16688152, 0.16688152, 0.16688152, 0.16688152]])}),
            Backward-Second(1)
            LSTMStateTuple({'c': array([[0.25328871, 0.17537864, 0.21700339, 0.25627687, 0.25627687]]),
              'h': array([[0.13779658, 0.09631104, 0.11861721, 0.1393639 , 0.1393639 ]])})))
            """
            encoder_state = []
            for layer_id in range(n_bi_layer):
                encoder_state.append(encoding_state[0][layer_id])  # forward
                encoder_state.append(encoding_state[1][layer_id])  # backward
            encoding_state = tuple(encoder_state)
            """
            First(0)
            ((LSTMStateTuple({'c': array([[0.30450274, 0.30450274, 0.30450274, 0.30450274, 0.30450274]]),
               'h': array([[0.16661529, 0.16661529, 0.16661529, 0.16661529, 0.16661529]])}),
            Second(1)
            LSTMStateTuple({'c': array([[0.30499766, 0.30499766, 0.30499766, 0.30499766, 0.30499766]]),
               'h': array([[0.16688152, 0.16688152, 0.16688152, 0.16688152, 0.16688152]])})),
            Third(2)
            (LSTMStateTuple({'c': array([[0.27710986, 0.07844026, 0.18714019, 0.28426586, 0.28426586]]),
               'h': array([[0.15019765, 0.04329417, 0.10251247, 0.1539225 , 0.1539225 ]])}),
            Fourth(3)
            LSTMStateTuple({'c': array([[0.25328871, 0.17537864, 0.21700339, 0.25627687, 0.25627687]]),
               'h': array([[0.13779658, 0.09631104, 0.11861721, 0.1393639 , 0.1393639 ]])})))
            """
    else:
        encoding_output, encoding_state=dynamic_rnn(get_rnn_cell(rnn_cell_size, dr_prob,n_layers,debug), rnn_inputs)
    return encoding_output, encoding_state
예제 #3
0
def RNN(x, weights, biases):
    fw_cell_list = []
    bw_cell_list = []
    for i in range(n_layers):
        with WeightsInitializer(initializer=init_ops.Constant(0.1)) as vs:
            fw_cell_list.append(LSTMCell(n_hidden, debug=True))
            bw_cell_list.append(LSTMCell(n_hidden, debug=True))
    fw_cell = MultiRNNCell(fw_cell_list)
    bw_cell = MultiRNNCell(bw_cell_list)
    result, state = bidirectional_dynamic_rnn(fw_cell, bw_cell,
                                              symbols_in_keys)
    "Dense in this case should be out of WeightsInitializer scope because we are passing constants"
    out_l = Dense(10,
                  kernel_initializer=init_ops.Constant(out_weights),
                  bias_initializer=init_ops.Constant(out_biases))
    fw_result, bw_result = result
    h = np.concatenate((fw_result, bw_result), -1)
    pred = out_l(h[0][-1].reshape(1, vocab_size))
    print("pred:", pred)
    return pred
예제 #4
0
              bias_initializer=init_ops.Constant(out_biases))

while step < training_iters:
    if offset > (len(train_data) - end_offset):
        offset = rnd.randint(0, n_input + 1)
    print("offset:", offset)
    symbols_in_keys = [
        input_one_hot(dictionary[str(train_data[i])], vocab_size)
        for i in range(offset, offset + n_input)
    ]
    symbols_in_keys = np.reshape(np.array(symbols_in_keys),
                                 [-1, n_input, vocab_size])
    target = dictionary[str(train_data[offset + n_input])]

    #cell = LSTMCell(vocab_size, n_hidden, w=weights, debug=True)
    result, state = bidirectional_dynamic_rnn(fw_cell, bw_cell,
                                              symbols_in_keys)
    #(c, h) = state
    print("finalresult:", result)
    print("finalstate:", state)
    fw_result, bw_result = result
    print("fw_result:", fw_result)
    print("bw_result:", bw_result)
    #print(":",fw_result.shape)
    print("cc:", np.concatenate((fw_result, bw_result), -1), ":",
          np.concatenate((fw_result, bw_result), -1).shape)
    #print("out_weights:",out_weights," out_biases:",out_biases)
    h = np.concatenate((fw_result, bw_result), -1)
    print("h:", h[0][-1])
    pred = out_l(h[0][-1].reshape(1, vocab_size))
    print("pred:", pred)