def main():
    import problem_unittests as tests

    tests.test_get_init_cell(get_init_cell)
    tests.test_get_embed(get_embed)
    tests.test_build_rnn(build_rnn)
    tests.test_build_nn(build_nn)
    tests.test_get_batches(get_batches)
    tests.test_get_tensors(get_tensors)
    tests.test_pick_word(pick_word)

    print(get_batches([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20],
                      batch_size=3,
                      seq_length=2))
def run_test():

    import problem_unittests as t

    t.test_create_lookup_tables(create_lookup_tables)
    t.test_get_batches(get_batches)
    t.test_tokenize(token_lookup)
    t.test_get_inputs(get_inputs)
    t.test_get_init_cell(get_init_cell)
    t.test_get_embed(get_embed)
    t.test_build_rnn(build_rnn)
    t.test_build_nn(build_nn)
    t.test_get_tensors(get_tensors)
    t.test_pick_word(pick_word)
Ejemplo n.º 3
0
    # TODO: Implement Function
    layers = 1
    keep_prob = 1
    lstm = tf.contrib.rnn.BasicLSTMCell(rnn_size)
    drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)
    cell = tf.contrib.rnn.MultiRNNCell([drop] * layers)

    initial_state = cell.zero_state(batch_size, tf.float32)
    initial_state = tf.identity(initial_state, name='initial_state')
    return (cell, initial_state)


"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_init_cell(get_init_cell)

# ### Word Embedding
# Apply embedding to `input_data` using TensorFlow.  Return the embedded sequence.

# In[49]:


def get_embed(input_data, vocab_size, embed_dim):
    """
    Create embedding for <input_data>.
    :param input_data: TF placeholder for text input.
    :param vocab_size: Number of words in vocabulary.
    :param embed_dim: Number of embedding dimensions
    :return: Embedded input.
    """