def run(arguments) -> None:
    print("Loading data ...")
    model = LanguageModel.restore(arguments["TRAINED_MODEL"])
    print(f"  Loaded trained model from {arguments['TRAINED_MODEL']}.")

    test_data = load_data_from_dir(
        model.vocab,
        length=model.hyperparameters["max_seq_length"],
        data_dir=arguments["TEST_DATA_DIR"],
        max_num_files=arguments.get("--max-num-files"),
    )
    print(
        f"  Loaded {test_data.shape[0]} test samples from {arguments['TEST_DATA_DIR']}."
    )

    test_loss, test_acc = model.run_one_epoch(
        get_minibatch_iterator(
            test_data,
            model.hyperparameters["batch_size"],
            is_training=False,
            drop_remainder=False,
        ),
        training=False,
    )
    print(f"Test:  Loss {test_loss:.4f}, Acc {test_acc:.3f}")
def run(arguments) -> None:
    model = LanguageModel.restore(arguments["TRAINED_MODEL"])

    def compute_next_token(token_seq: List[str], num_cands: int = 3) -> str:
        tensorised_seq = tensorise_token_sequence(model.vocab, len(token_seq) + 1, token_seq)
        next_tok_probs = model.predict_next_token(tensorised_seq)
        top_idxs = (-next_tok_probs).argsort()[:num_cands]
        return [(model.vocab.get_name_for_id(top_idx),
                 next_tok_probs[top_idx])
                for top_idx in top_idxs]

    tokens = arguments['TOKENS']
    for idx in range(int(arguments['--num-steps'])):
        cands = compute_next_token(tokens)
        print("Prediction at step %i (tokens %s):" % (idx, tokens))
        for (token, prob) in cands:
            print(" Prob %.3f: %s" % (prob, token))
        next_tok = cands[0][0]
        if next_tok == END_SYMBOL:
            print('Reached end of sequence. Stopping.')
            break
        print("Continuing with token %s" % next_tok)
        tokens.append(next_tok)