if args.embedding == 'random': embedding = 'random' else: embedding = args.embedding model_name = args.model print(model_name) x = import_module('models.' + model_name) config = Config(dataset, outputdir, embedding) # reset config config.model_name = args.model config.save_path = os.path.join(outputdir, args.model + '.ckpt') config.log_path = os.path.join(outputdir, args.model + '.log') config.dropout = float(args.dropout) config.require_improvement = int(args.require_improvement) config.num_epochs = int(args.num_epochs) config.batch_size = int(args.batch_size) config.max_length = int(args.max_length) config.learning_rate = float(args.learning_rate) config.embed = int(args.embed_dim) config.bucket = int(args.bucket) config.wordNgrams = int(args.wordNgrams) config.lr_decay_rate = float(args.lr_decay_rate) start_time = time.time() print("Loading data...") vocab, train_data, dev_data, test_data = build_dataset( config, args.use_word_segment, min_freq=int(args.min_freq)) time_dif = get_time_dif(start_time)