def main(): """ Starting point of the application """ hvd.init() params = parse_args(PARSER.parse_args()) set_flags(params) model_dir = prepare_model_dir(params) params.model_dir = model_dir logger = get_logger(params) model = Unet() dataset = Dataset(data_dir=params.data_dir, batch_size=params.batch_size, fold=params.crossvalidation_idx, augment=params.augment, gpu_id=hvd.rank(), num_gpus=hvd.size(), seed=params.seed) if 'train' in params.exec_mode: train(params, model, dataset, logger) if 'evaluate' in params.exec_mode: if hvd.rank() == 0: evaluate(params, model, dataset, logger) if 'predict' in params.exec_mode: if hvd.rank() == 0: predict(params, model, dataset, logger)
def evaluate(): if ask("Create & view more functions", default=False): main() else: ask_to_add_taylor_polynomial() min_x = prompt("Min x", expect=float) max_x = prompt("Max x", expect=float, optional=True, fast=False) delta = prompt("Evaluation delta", expect=float, optional=True, fast=False) points = [] try: if ask("Analyze functions"): points = analyze_functions() except Exception as e: print(e) points = [] if points is None: points = [] run.evaluate(functions, min_x=min_x, max_x=max_x, delta=delta, points=points)
def main(): parser = get_argparse() parser.add_argument("--fine_tunning_model", type=str, required=True, help="fine_tuning model path") args = parser.parse_args() print( json.dumps(vars(args), sort_keys=True, indent=4, separators=(', ', ': '), ensure_ascii=False)) init_logger(log_file="./log/{}.log".format( time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))) seed_everything(args.seed) # save path if not os.path.exists(args.output_dir): os.mkdir(args.output_dir) # device args.device = torch.device( "cuda:0" if torch.cuda.is_available() else "cpu") # tokenizer tokenizer = BertTokenizerFast.from_pretrained(args.model_name_or_path) # Dataset & Dataloader test_dataset = MrcDataset(args, json_path="./data/test1.json", tokenizer=tokenizer) test_iter = DataLoader(test_dataset, shuffle=False, batch_size=args.per_gpu_eval_batch_size, collate_fn=collate_fn, num_workers=24) logger.info("The nums of the test_dataset examples is {}".format( len(test_dataset.examples))) logger.info("The nums of the test_dataset features is {}".format( len(test_dataset))) # model model = MRC_model(args.model_name_or_path) model.to(args.device) model.load_state_dict(torch.load(args.fine_tunning_model)) # predict test model.eval() evaluate(args, test_iter, model, prefix="test")
def main(): """ Main function for running the whole network based on the parameters set in the "conf" dictionary in the config() function. First, the network is trained, and checked against a development set at the prefered increments, then the training progress is plotted, before the final evaluation is done on all three data sets. """ # Get parameters from config() function conf = config() # Get all data and split it into three sets. Format: (datasize, channels, height, width). X_train, Y_train, X_devel, Y_devel, X_test, Y_test = get_data(conf) # Test with keras if conf["keras"] == True: train_progress, devel_progress = run.kerasnet(conf, X_train, Y_train, X_devel, Y_devel, X_test, Y_test) plot_progress(conf, train_progress, devel_progress) sys.exit() # Run training and save weights and biases in params_dnn and params_cnn. conf, params_dnn, params_cnn, train_progress, devel_progress = run.train( conf, X_train, Y_train, X_devel, Y_devel, ) # Plot the progress of the network over training steps plot_progress(conf, train_progress, devel_progress) # Evaluate the network on all three data sets. If output=True, then the predictions made on the test set is saved. print("Evaluating train set") num_correct, num_evaluated = run.evaluate(conf, params_dnn, params_cnn, X_train, Y_train) print("CCR = {0:>5} / {1:>5} = {2:>6.4f}".format( num_correct, num_evaluated, num_correct / num_evaluated)) print("Evaluating development set") num_correct, num_evaluated = run.evaluate(conf, params_dnn, params_cnn, X_devel, Y_devel) print("CCR = {0:>5} / {1:>5} = {2:>6.4f}".format( num_correct, num_evaluated, num_correct / num_evaluated)) print("Evaluating test set") num_correct, num_evaluated = run.evaluate(conf, params_dnn, params_cnn, X_test, Y_test, output=conf["output"]) print("CCR = {0:>5} / {1:>5} = {2:>6.4f}".format( num_correct, num_evaluated, num_correct / num_evaluated))
def main(): """Run the program according to specified configurations.""" conf = config() X_train, Y_train, X_devel, Y_devel, X_test, Y_test = get_data(conf) params, train_progress, devel_progress = run.train(conf, X_train, Y_train, X_devel, Y_devel) plot_progress(train_progress, devel_progress) print("Evaluating train set") num_correct, num_evaluated = run.evaluate(conf, params, X_train, Y_train) print("CCR = {0:>5} / {1:>5} = {2:>6.4f}".format(num_correct, num_evaluated, num_correct/num_evaluated)) print("Evaluating development set") num_correct, num_evaluated = run.evaluate(conf, params, X_devel, Y_devel) print("CCR = {0:>5} / {1:>5} = {2:>6.4f}".format(num_correct, num_evaluated, num_correct/num_evaluated)) print("Evaluating test set") num_correct, num_evaluated = run.evaluate(conf, params, X_test, Y_test) print("CCR = {0:>5} / {1:>5} = {2:>6.4f}".format(num_correct, num_evaluated, num_correct/num_evaluated))
def eval_model(name, seed2model, test_data): test_accs = [] for seed in ["0", "42", "420"]: torch.manual_seed(seed) criterion = torch.nn.CrossEntropyLoss() prep_fn = (run.prepare_treelstm_minibatch if "tree_lstm" in name else run.prepare_minibatch) model = seed2model[seed] test_loss, _, _, test_acc = run.evaluate( model, test_data, loss_fn=criterion, batch_size=run.BATCH_SIZE, batch_fn=run.get_minibatch, prep_fn=prep_fn, ) test_accs.append(test_acc) return np.mean(test_accs), np.std(test_accs)
sys.stdout.write("Versions: \n") sys.stdout.write(" CUDA: %s\n" % torch.version.cuda) sys.stdout.write(" cuDNN: %s\n" % torch.backends.cudnn.version()) sys.stdout.write("Memory Usage:\n") sys.stdout.write( " Max Alloc: %g GB\n" % round(torch.cuda.max_memory_allocated(cur_device) / 1024**3, 1)) sys.stdout.write( " Allocated: %g GB\n" % round(torch.cuda.memory_allocated(cur_device) / 1024**3, 1)) sys.stdout.write( " Cached: %g GB\n" % round(torch.cuda.memory_reserved(cur_device) / 1024**3, 1)) sys.stdout.write("\n") else: device = 'cpu' sys.stdout.write("You are using CPU.\n") method = "2" data_path = "../data/WA_Fn-UseC_-HR-Employee-Attrition.csv" sys.stdout.write("You chose the method %s.\n" % method) sys.stdout.write("Data path: %s\n" % data_path) sys.stdout.write('\n') run.init(device, data_path, "method_" + method) run.train() accuracy = run.evaluate("valid") sys.stdout.write("Accuracy: %.2f%%\n" % (accuracy * 100))
# model.shuffle_pair() # eval_losses_meters.append(eval_loss_meter) # eval_accuracy_meters.append(eval_acc_meter) # eval_entropy_meters.append(eval_entropy_meter) # eval_distinctness_meters.append(eval_distinctness_meter) # eval_rsa_sr_meters.append(eval_rsa_sr_meter) # eval_rsa_si_meters.append(eval_rsa_si_meter) # eval_rsa_ri_meters.append(eval_rsa_ri_meter) # eval_topological_sim_meters.append(eval_topological_sim_meter) # eval_posdis_meters.append(eval_posdis_meter) # eval_bosdis_meters.append(eval_bosdis_meter) # eval_language_entropy_meters.append(eval_lang_entropy_meter) (_, noise_acc_meter, _, _, _, _, _, _, _, _, _, _, _, _) = evaluate(model, noise_data, eval_word_counts, noise_metadata, debugging) noise_accuracy_meters.append(noise_acc_meter) model.shuffle_pair() print( 'Epoch {}, average train loss: {}, \n average accuracy: {},, average noise accuracy: {} \n' .format(e, losses_meters[e].avg, accuracy_meters[e].avg, noise_accuracy_meters[e].avg)) # if rsa_sampling > 0: # print(' RSA sender-receiver: {}, RSA sender-input: {}, RSA receiver-input: {} \n Topological sim: {} \n'.format( # epoch_rsa_sr_meter.avg, epoch_rsa_si_meter.avg, epoch_rsa_ri_meter.avg, epoch_topological_sim_meter.avg)) # print(' Train posdis: {}, Train posdis: {}, Eval posdis: {}, Eval bosdis: {}'.format( # epoch_bosdis_meter.avg, epoch_bosdis_meter.avg, eval_posdis_meter.avg, eval_bosdis_meter.avg)) # print(' Eval RSA sender-receiver: {}, Eval RSA sender-input: {}, Eval RSA receiver-input: {}\n Eval Topological sim: {}\n'.format( # eval_rsa_sr_meter.avg, eval_rsa_si_meter.avg, eval_rsa_ri_meter.avg, eval_topological_sim_meter.avg)) wandb.log({'average noise accuracy': noise_accuracy_meters[e].avg},
def test_one_a(self): ans = [2, 0, 4, 8] guess = "2179" self.assertEqual(evaluate(guess, ans), [1, 0])
def test_two_b(self): ans = [2, 0, 4, 8] guess = "9214" self.assertEqual(evaluate(guess, ans), [0, 2])
def main(): """ Starting point of the application """ flags = PARSER.parse_args() params = _cmd_params(flags) backends = [StdOutBackend(Verbosity.VERBOSE)] if params.log_dir is not None: backends.append(JSONStreamBackend(Verbosity.VERBOSE, params.log_dir)) logger = Logger(backends) # Optimization flags os.environ['CUDA_CACHE_DISABLE'] = '0' os.environ['HOROVOD_GPU_ALLREDUCE'] = 'NCCL' os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' os.environ['TF_GPU_THREAD_MODE'] = 'gpu_private' os.environ['TF_USE_CUDNN_BATCHNORM_SPATIAL_PERSISTENT'] = 'data' os.environ['TF_ADJUST_HUE_FUSED'] = 'data' os.environ['TF_ADJUST_SATURATION_FUSED'] = 'data' os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = 'data' os.environ['TF_SYNC_ON_FINISH'] = '0' os.environ['TF_AUTOTUNE_THRESHOLD'] = '2' hvd.init() if params.use_xla: tf.config.optimizer.set_jit(True) gpus = tf.config.experimental.list_physical_devices('GPU') for gpu in gpus: tf.config.experimental.set_memory_growth(gpu, True) if gpus: tf.config.experimental.set_visible_devices(gpus[hvd.local_rank()], 'GPU') if params.use_amp: tf.keras.mixed_precision.experimental.set_policy('mixed_float16') else: os.environ['TF_ENABLE_AUTO_MIXED_PRECISION'] = '0' # Build the model model = Unet() dataset = Dataset(data_dir=params.data_dir, batch_size=params.batch_size, fold=params.crossvalidation_idx, augment=params.augment, gpu_id=hvd.rank(), num_gpus=hvd.size(), seed=params.seed) if 'train' in params.exec_mode: train(params, model, dataset, logger) if 'evaluate' in params.exec_mode: if hvd.rank() == 0: model = restore_checkpoint(model, params.model_dir) evaluate(params, model, dataset, logger) if 'predict' in params.exec_mode: if hvd.rank() == 0: model = restore_checkpoint(model, params.model_dir) predict(params, model, dataset, logger)
def test_correct(self): ans = [2, 0, 4, 8] guess = "2048" self.assertEqual(evaluate(guess, ans), [4, 0])
def eval_(): config = MlConfig(agent_name="abcd-agent") data_provider = DataProvider(config.db) env = StockEnvironment(data_provider, config.max_step_per_episode, 0) agent = load_agent(config, env, None) evaluate(config, data_provider, 0, agent)
# eval_lang_entropy_meter) = evaluate(model, valid_data, eval_word_counts, valid_metadata, debugging) # # eval_losses_meters.append(eval_loss_meter) # eval_accuracy_meters.append(eval_acc_meter) # eval_entropy_meters.append(eval_entropy_meter) # eval_distinctness_meters.append(eval_distinctness_meter) # eval_rsa_sr_meters.append(eval_rsa_sr_meter) # eval_rsa_si_meters.append(eval_rsa_si_meter) # eval_rsa_ri_meters.append(eval_rsa_ri_meter) # eval_topological_sim_meters.append(eval_topological_sim_meter) # eval_posdis_meters.append(eval_posdis_meter) # eval_bosdis_meters.append(eval_bosdis_meter) # eval_language_entropy_meters.append(eval_lang_entropy_meter) (_, noise_acc_meter, _, _, _, _, _, _, _, _, _, _, _, _) = evaluate(model, noise_data, eval_word_counts, noise_metadata, debugging) noise_accuracy_meters.append(noise_acc_meter) print( 'Epoch {}, average train loss: {}, \n average accuracy: {},, average noise accuracy: {} \n' .format(e, losses_meters[e].avg, accuracy_meters[e].avg, noise_accuracy_meters[e].avg)) # if rsa_sampling > 0: # print(' RSA sender-receiver: {}, RSA sender-input: {}, RSA receiver-input: {} \n Topological sim: {} \n'.format( # epoch_rsa_sr_meter.avg, epoch_rsa_si_meter.avg, epoch_rsa_ri_meter.avg, epoch_topological_sim_meter.avg)) # print(' Train posdis: {}, Train posdis: {}, Eval posdis: {}, Eval bosdis: {}'.format( # epoch_bosdis_meter.avg, epoch_bosdis_meter.avg, eval_posdis_meter.avg, eval_bosdis_meter.avg)) # print(' Eval RSA sender-receiver: {}, Eval RSA sender-input: {}, Eval RSA receiver-input: {}\n Eval Topological sim: {}\n'.format( # eval_rsa_sr_meter.avg, eval_rsa_si_meter.avg, eval_rsa_ri_meter.avg, eval_topological_sim_meter.avg)) # wandb.log({ 'average noise accuracy': noise_accuracy_meters[e].avg}, commit=False)
batch_size) dev_ds = tf.data.Dataset.from_tensor_slices(dev).shuffle(2000).batch( batch_size * 2) test_ds = tf.data.Dataset.from_tensor_slices(test).shuffle(2000).batch( batch_size * 2) embedding_pretrained = utils.load_word2vec( 'data/embeddings/wiki_100.utf8', token2idx, embed_dim, 'data/embeddings/embed_mat.npy') model = LSTM_CRF(len(token2idx), embed_dim, maxlen, len(tag2idx), rnn_hiden_size, embedding_pretrained) optimizer = tf.keras.optimizers.Adam(lr=0.003) run.training(model, train_ds, dev_ds, epochs, optimizer) run.evaluate(model, test_ds, data_name="测试集") # # # save model # # print("\nsave model...") # # model.save_weights('model saved/') # # # load model # print("load model...") # model.load_weights('model saved/') # model.summary() run.evaluate(model, test_ds, data_name="测试集", print_score=True, tag_names=list(tag2idx.keys())) print("___" * 30)
def main_exceed(): """Run the program according to specified configurations.""" ################################### Task 1.6b: Exceed results conf = config() conf['dataset'] = 'mnist' # Dataset conf['max_steps'] = 5000 # Training steps conf['learning_rate'] = 1.0e-2*0.5 # Learning rate conf['hidden_dimensions'] = [128, 64, 32] # Hidden layers & Nodes conf['batch_size'] = 64 # Batch size conf['activation_function'] = 'sigmoid' # Hidden activation function print("----------START DNN ON: ",conf['dataset']) X_train, Y_train, X_devel, Y_devel, X_test, Y_test = get_data(conf) params, train_progress, devel_progress = run.train(conf, X_train, Y_train, X_devel, Y_devel) plot_progress(train_progress, devel_progress) print("Evaluating train set") num_correct, num_evaluated = run.evaluate(conf, params, X_train, Y_train) print("CCR = {0:>5} / {1:>5} = {2:>6.4f}".format(num_correct, num_evaluated, num_correct/num_evaluated)) print("Evaluating development set") num_correct, num_evaluated = run.evaluate(conf, params, X_devel, Y_devel) print("CCR = {0:>5} / {1:>5} = {2:>6.4f}".format(num_correct, num_evaluated, num_correct/num_evaluated)) print("Evaluating test set") num_correct, num_evaluated = run.evaluate(conf, params, X_test, Y_test) print("CCR = {0:>5} / {1:>5} = {2:>6.4f}".format(num_correct, num_evaluated, num_correct/num_evaluated)) print("----------END DNN ON: ",conf['dataset']) ################################### Task 1.6a: Reproduce results cifar10 conf = config() conf['dataset'] = 'cifar10' # Dataset conf['max_steps'] = 12000 # Training steps conf['learning_rate'] = 1.0e-2*4 # Learning rate conf['hidden_dimensions'] = [256, 128, 64, 32] # Hidden layers & Nodes conf['batch_size'] = 32 # Batch size conf['activation_function'] = 'tanh' # Hidden activation function print("----------START DNN ON: ",conf['dataset']) X_train, Y_train, X_devel, Y_devel, X_test, Y_test = get_data(conf) params, train_progress, devel_progress = run.train(conf, X_train, Y_train, X_devel, Y_devel) plot_progress(train_progress, devel_progress) print("Evaluating train set") num_correct, num_evaluated = run.evaluate(conf, params, X_train, Y_train) print("CCR = {0:>5} / {1:>5} = {2:>6.4f}".format(num_correct, num_evaluated, num_correct/num_evaluated)) print("Evaluating development set") num_correct, num_evaluated = run.evaluate(conf, params, X_devel, Y_devel) print("CCR = {0:>5} / {1:>5} = {2:>6.4f}".format(num_correct, num_evaluated, num_correct/num_evaluated)) print("Evaluating test set") num_correct, num_evaluated = run.evaluate(conf, params, X_test, Y_test) print("CCR = {0:>5} / {1:>5} = {2:>6.4f}".format(num_correct, num_evaluated, num_correct/num_evaluated)) print("----------END DNN ON: ",conf['dataset']) ################################### Task 1.6a: Reproduce results svhn conf = config() conf['dataset'] = 'svhn' # Dataset conf['max_steps'] = 12000 # Training steps conf['learning_rate'] = 1.0e-2*0.5 # Learning rate conf['hidden_dimensions'] = [256, 64, 32] # Hidden layers & Nodes conf['batch_size'] = 64 # Batch size conf['activation_function'] = 'sigmoid' # Hidden activation function print("----------START DNN ON: ",conf['dataset']) X_train, Y_train, X_devel, Y_devel, X_test, Y_test = get_data(conf) params, train_progress, devel_progress = run.train(conf, X_train, Y_train, X_devel, Y_devel) plot_progress(train_progress, devel_progress) print("Evaluating train set") num_correct, num_evaluated = run.evaluate(conf, params, X_train, Y_train) print("CCR = {0:>5} / {1:>5} = {2:>6.4f}".format(num_correct, num_evaluated, num_correct/num_evaluated)) print("Evaluating development set") num_correct, num_evaluated = run.evaluate(conf, params, X_devel, Y_devel) print("CCR = {0:>5} / {1:>5} = {2:>6.4f}".format(num_correct, num_evaluated, num_correct/num_evaluated)) print("Evaluating test set") num_correct, num_evaluated = run.evaluate(conf, params, X_test, Y_test) print("CCR = {0:>5} / {1:>5} = {2:>6.4f}".format(num_correct, num_evaluated, num_correct/num_evaluated)) print("----------END DNN ON: ",conf['dataset'])
def main(): """Run the program according to specified configurations.""" ################################### Task 1.6a: Reproduce results mnist conf = config() conf['dataset'] = 'mnist' print("----------START DNN ON: ",conf['dataset']) X_train, Y_train, X_devel, Y_devel, X_test, Y_test = get_data(conf) params, train_progress, devel_progress = run.train(conf, X_train, Y_train, X_devel, Y_devel) plot_progress(train_progress, devel_progress) print("Evaluating train set") num_correct, num_evaluated = run.evaluate(conf, params, X_train, Y_train) print("CCR = {0:>5} / {1:>5} = {2:>6.4f}".format(num_correct, num_evaluated, num_correct/num_evaluated)) print("Evaluating development set") num_correct, num_evaluated = run.evaluate(conf, params, X_devel, Y_devel) print("CCR = {0:>5} / {1:>5} = {2:>6.4f}".format(num_correct, num_evaluated, num_correct/num_evaluated)) print("Evaluating test set") num_correct, num_evaluated = run.evaluate(conf, params, X_test, Y_test) print("CCR = {0:>5} / {1:>5} = {2:>6.4f}".format(num_correct, num_evaluated, num_correct/num_evaluated)) print("----------END DNN ON: ",conf['dataset']) ################################### Task 1.6a: Reproduce results cifar10 conf = config() conf['dataset'] = 'cifar10' conf['max_steps'] = 10000 print("----------START DNN ON: ",conf['dataset']) X_train, Y_train, X_devel, Y_devel, X_test, Y_test = get_data(conf) params, train_progress, devel_progress = run.train(conf, X_train, Y_train, X_devel, Y_devel) plot_progress(train_progress, devel_progress) print("Evaluating train set") num_correct, num_evaluated = run.evaluate(conf, params, X_train, Y_train) print("CCR = {0:>5} / {1:>5} = {2:>6.4f}".format(num_correct, num_evaluated, num_correct/num_evaluated)) print("Evaluating development set") num_correct, num_evaluated = run.evaluate(conf, params, X_devel, Y_devel) print("CCR = {0:>5} / {1:>5} = {2:>6.4f}".format(num_correct, num_evaluated, num_correct/num_evaluated)) print("Evaluating test set") num_correct, num_evaluated = run.evaluate(conf, params, X_test, Y_test) print("CCR = {0:>5} / {1:>5} = {2:>6.4f}".format(num_correct, num_evaluated, num_correct/num_evaluated)) print("----------END DNN ON: ",conf['dataset']) ################################### Task 1.6a: Reproduce results svhn conf = config() conf['dataset'] = 'svhn' conf['max_steps'] = 10000 print("----------START DNN ON: ",conf['dataset']) X_train, Y_train, X_devel, Y_devel, X_test, Y_test = get_data(conf) params, train_progress, devel_progress = run.train(conf, X_train, Y_train, X_devel, Y_devel) plot_progress(train_progress, devel_progress) print("Evaluating train set") num_correct, num_evaluated = run.evaluate(conf, params, X_train, Y_train) print("CCR = {0:>5} / {1:>5} = {2:>6.4f}".format(num_correct, num_evaluated, num_correct/num_evaluated)) print("Evaluating development set") num_correct, num_evaluated = run.evaluate(conf, params, X_devel, Y_devel) print("CCR = {0:>5} / {1:>5} = {2:>6.4f}".format(num_correct, num_evaluated, num_correct/num_evaluated)) print("Evaluating test set") num_correct, num_evaluated = run.evaluate(conf, params, X_test, Y_test) print("CCR = {0:>5} / {1:>5} = {2:>6.4f}".format(num_correct, num_evaluated, num_correct/num_evaluated)) print("----------END DNN ON: ",conf['dataset'])
map_location=lambda storage, location: storage) without_cnn_state = {k: v for k, v in state.items() if not 'cnn' in k} model.load_state_dict(without_cnn_state) if use_gpu: model = model.cuda() # Evaluate model on test data test_word_counts = torch.zeros([vocab_size]) if use_gpu: test_word_counts = test_word_counts.cuda() (test_loss_meter, test_acc_meter, test_messages, test_indices, _w_counts, test_entropy_meter, test_distinctness_meter, test_rsa_sr_meter, test_rsa_si_meter, test_rsa_ri_meter, test_topological_sim_meter, _) = evaluate(model, test_data, test_word_counts, target_test_metadata, debugging) print() print('Test accuracy: {}'.format(test_acc_meter.avg)) if should_dump: best_epoch = model_file_name.split('_')[-2] pickle.dump( test_loss_meter, open( '{}/{}_{}_test_losses_meter.p'.format(current_model_dir, dump_id, best_epoch), 'wb')) pickle.dump( test_acc_meter, open(
from run import BATCH_SIZE, EMBEDDING_SIZE, RNNModel, TEXT, USE_CUDA, VOCAB_SIZE, evaluate, device import torch import torchtext import numpy as np best_model = RNNModel('LSTM', VOCAB_SIZE, EMBEDDING_SIZE, EMBEDDING_SIZE, 2, dropout=0.5) if USE_CUDA: best_model.cuda() best_model.load_state_dict(torch.load('lm-best.th')) train, val, test = torchtext.legacy.datasets.LanguageModelingDataset.splits(path='../data/', train='text8.train.txt', validation='text8.dev.txt', test='text8.test.txt', text_field=TEXT) train_iter, val_iter, test_iter = torchtext.legacy.data.BPTTIterator.splits( (train, val, test), batch_size=BATCH_SIZE, device=device, bptt_len=32, repeat=False, shuffle=True ) val_loss = evaluate(best_model, val_iter) print("perplexity on val set:", np.exp(val_loss)) test_loss = evaluate(best_model, test_iter) print("perplexity on test set:", np.exp(test_loss))