def test_all():
    # Run on test data.
    for test_subset in data['test']:
        test_data = Data.BucketIterator(data['test'][test_subset],
                                        args.batch_size,
                                        args.cuda,
                                        args,
                                        shuffle=False)
        print('=' * 89)
        test_loss, test_acc, total_correctness = evaluate(test_data)
        print('| subset %s | test loss %5.3f | test acc %8.3f' %
              (test_subset, test_loss, test_acc))
    print('=' * 89)
        )
    else:
        torch.cuda.manual_seed(args.seed)

###############################################################################
# Load data
###############################################################################

dataset = torch.load(args.data)
vocab = dataset["vocab"]
data = dataset["data"]
ntokens = vocab.size()
if not args.test_only:
    train_data = Data.BucketIterator(data['train'],
                                     args.batch_size,
                                     args.cuda,
                                     args,
                                     shuffle=True,
                                     infor_weighting=args.infor_weighting)
    valid_data = Data.BucketIterator(data['valid'],
                                     args.batch_size,
                                     args.cuda,
                                     args,
                                     shuffle=False)
test_data = Data.BucketIterator(data['test']["whole"],
                                args.batch_size,
                                args.cuda,
                                args,
                                shuffle=False)
if args.pre_trained is not None or args.embedding_file == "None":
    embeddings = gen_embeddings(vocab, ntokens, args.emsize)
else: