def run_test(): import problem_unittests as t t.test_create_lookup_tables(create_lookup_tables) t.test_get_batches(get_batches) t.test_tokenize(token_lookup) t.test_get_inputs(get_inputs) t.test_get_init_cell(get_init_cell) t.test_get_embed(get_embed) t.test_build_rnn(build_rnn) t.test_build_nn(build_nn) t.test_get_tensors(get_tensors) t.test_pick_word(pick_word)
""" tokens = dict() tokens['.'] = '<PERIOD>' tokens[','] = '<COMMA>' tokens['"'] = '<QUOTATION_MARK>' tokens[';'] = '<SEMICOLON>' tokens['!'] = '<EXCLAMATION_MARK>' tokens['?'] = '<QUESTION_MARK>' tokens['('] = '<LEFT_PAREN>' tokens[')'] = '<RIGHT_PAREN>' tokens['?'] = '<QUESTION_MARK>' tokens['-'] = '<HYPHEN>' tokens['\n'] = '<NEW_LINE>' #tokens[':'] = '<COLON>' return tokens tests.test_tokenize(token_lookup) ################################################# ## Pre-process all the data and save it ################################################# # pre-process training data helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables) ################################################# ## Check Point ################################################# int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess() ################################################# ## Build the Neural Network
def test_create_punctuation_map(self): test_tokenize(token_lookup=create_punctuation_map)