if config['cli_dialog']: env.cli_dialog( restore_path=restore_path, log_file=os.path.expanduser(config['cli_dialog_file']), vocabulary=vocabulary, character_positions_in_vocabulary=cpiv, batch_generator_class=BatchGenerator, reset_state_after_model_answer=args. reset_state_after_model_answer, # if True after bot answer hidden state is reset append_logs= False, # if False and log_file already exists logs are put in to log_file + `#[index]` answer_len_limit=500., # max number of characters in bot answer randomize=not config[ 'do_not_randomize_hidden_state'], # if True model hidden state is initialized with random numbers preprocess_f=preprocess_f, postprocess_f=postprocess_f, temperature=config['temperature'], ) # print('before telegram method') if config['telegram_bot']: env.telegram( kwargs_for_building=kwargs_for_model_building, restore_path=restore_path, vocabulary=vocabulary, character_positions_in_vocabulary=cpiv, batch_generator_class=BatchGenerator, log_path=config['telegram_log_path'], build=False, temperature=config['temperature'], )
vocabulary = create_vocabulary(text) vocabulary_size = len(vocabulary) env = Environment(Lstm, LstmBatchGenerator, vocabulary=vocabulary) cpiv = get_positions_in_vocabulary(vocabulary) kwargs_for_building = dict( batch_size=64, num_layers=2, num_nodes=[300, 300], num_output_layers=2, num_output_nodes=[124], vocabulary_size=vocabulary_size, # dim_compressed=10, num_unrollings=30, init_parameter=3., regularization_rate=.00001, regime='inference', going_to_limit_memory=True) add_feed = [{'placeholder': 'dropout', 'value': 0.9}] valid_add_feed = [{'placeholder': 'dropout', 'value': 1.}] env.telegram(kwargs_for_building, 'debugging_lstm_and_gru/first/checkpoints/10ZZXXZZZZZZZZZZZZ0', 'telegram/debug', vocabulary, cpiv, LstmBatchGenerator, additions_to_feed_dict=valid_add_feed)