def __init__(self, mode = 'chat'): if not os.path.isdir(config.PROCESSED_PATH): data.prepare_raw_data() data.process_data() # create checkpoints folder if there isn't one already data.make_dir(config.CPT_PATH) if(mode == "chat"): self.__chat_init()
def main(): #parser = argparse.ArgumentParser() #parser.add_argument('--mode', choices={'train', 'chat'}, # default='train', help="mode. if not specified, it's in the train mode") #args = parser.parse_args() if not os.path.isdir(config.PROCESSED_PATH): data.prepare_raw_data() data.process_data() print('Data ready!') # create checkpoints folder if there isn't one already data.make_dir(config.CPT_PATH)
def main(): parser = argparse.ArgumentParser() parser.add_argument('--mode', choices={'train', 'chat'}, default='train', help="mode. if not specified, it's in the train mode") args = parser.parse_args() if not os.path.isdir(config.PROCESSED_PATH): data.prepare_raw_data() data.process_data() print('Data ready!') # create checkpoints folder if there isn't one already data.make_dir(config.CPT_PATH) if args.mode == 'train': train() elif args.mode == 'chat': chat()
def main(): parser = argparse.ArgumentParser() parser.add_argument('--mode',choices={'train','chat'},default='train',help="mode if not specified its in train mode") args = parser.parse_args() if not os.path.isdir(config.PROCESSED_PATH): data.prepare_raw_data() data.process_data() print('Data Ready!') data.make_dir(config.CPT_PATH) if args.mode == 'train': train() elif args.mode == 'chat': chat()
def main(): parser = argparse.ArgumentParser() parser.add_argument('--mode', choices={'train', 'chat'}, default='train', help="mode. if not specified, it's in the train mode") args = parser.parse_args() if not os.path.isdir(config.PROCESSED_PATH): data.prepare_raw_data() data.process_data() print("Data ready, starting application") # create checkpoints folder if there isn't one already data.make_dir(config.CPT_PATH) if args.mode == 'train': train() elif args.mode == 'chat': chat()
def main(): parser = argparse.ArgumentParser() parser.add_argument('mode', choices={'train', 'test', 'translate'}, default='train', help="mode. if not specified, it's in the train mode") args = parser.parse_args() if not os.path.isdir(config.PROCESSED_PATH): data.prepare_raw_data() data.process_data() print('Data ready!') # create checkpoints folder if there isn't one already data.make_dir(config.CPT_PATH) if args.mode == 'train': train() elif args.mode == 'test': bleu_scores = test() elif args.mode == 'translate': translate()
def main(): """parser = argparse.ArgumentParser() parser.add_argument('--mode', choices={'train', 'chat'}, default='train', help="mode. if not specified, it's in the train mode") args = parser.parse_args()""" if not os.path.isdir(config.PROCESSED_PATH): data.prepare_raw_data() data.process_data() print('Data ready!') # create checkpoints folder if there isn't one already data.make_dir(config.CPT_PATH) mode = input("Input mode (train|chat): ") """if args.mode == 'train': train() elif args.mode == 'chat': chat()""" if mode == 'train': train() else: chat()
def start_training(): if not os.path.isdir(config.PROCESSED_PATH): data.prepare_raw_data() data.process_data() print('Data ready!') # create checkpoints folder if there isn't one already data.make_dir(config.CPT_PATH) """ Train the bot """ test_buckets, data_buckets, train_buckets_scale = _get_buckets() # in train mode, we need to create the backward path, so forwrad_only is False model = Seq2SeqModel(False, config.BATCH_SIZE) model.build_graph() saver = tf.train.Saver() with tf.Session() as sess: print('Running session') sess.run(tf.global_variables_initializer()) _check_restore_parameters(sess, saver) iteration = model.global_step.eval() total_loss = 0 # Infinite loop print('Start training ...') train_record_file = open( os.path.join(config.PROCESSED_PATH, config.TRAINING_RECORD_FILE), 'a+') test_record_file = open( os.path.join(config.PROCESSED_PATH, config.TESTING_RECORD_FILE), 'a+') while True: try: skip_step = _get_skip_step(iteration) bucket_id = _get_random_bucket(train_buckets_scale) encoder_inputs, decoder_inputs, decoder_masks = data.get_batch( data_buckets[bucket_id], bucket_id, batch_size=config.BATCH_SIZE) start = time.time() _, step_loss, _ = run_step(sess, model, encoder_inputs, decoder_inputs, decoder_masks, bucket_id, False) total_loss += step_loss iteration += 1 if iteration % skip_step == 0: _train_info = 'Iter {}: loss {}, time {}'.format( iteration, total_loss / skip_step, time.time() - start) print(_train_info) train_record_file.write(_train_info + '\n') start = time.time() total_loss = 0 saver.save(sess, os.path.join(config.CPT_PATH, 'chatbot'), global_step=model.global_step) if iteration % (10 * skip_step) == 0: # Run evals on development set and print their loss _test_info = _eval_test_set(sess, model, test_buckets) for item in _test_info: print(item) test_record_file.write("%s\n" % item) start = time.time() sys.stdout.flush() except KeyboardInterrupt: print('Interrupted by user at iteration {}'.format(iteration)) train_record_file.close() test_record_file.close()