Ejemplo n.º 1
0
                time_per_line * nb_lines_ahead))
        dense_logits = prepare_dense_logits(sparse_logits)

        if args.greedy:
            boh = decoder(dense_logits)
        else:
            boh = decoder(dense_logits, args.model_eos)
        one_best = boh.best_hyp()
        decodings[name] = one_best
        confidences[name] = boh.confidence()

        if args.cn_best:
            cn = confusion_networks.produce_cn_from_boh(boh)
            cn_decodings[name] = confusion_networks.best_cn_path(cn)
    reporter.clear()

    save_transcriptions(args.best, decodings)

    with open(args.confidence, 'w') as f:
        for name in decodings:
            f.write('{} {:.3f}\n'.format(name, confidences[name]))

    if args.cn_best:
        save_transcriptions(args.cn_best, cn_decodings)


if __name__ == "__main__":
    args = parse_arguments()
    gpu_owner = GPUOwner()
    main(args)
Ejemplo n.º 2
0
    parser.add_argument('--seed', type=int, default=1111,
                        help='random seed')
    parser.add_argument('--cuda', action='store_true',
                        help='use CUDA')
    parser.add_argument('--load', type=str, required=True,
                        help='where to load a model from')
    args = parser.parse_args()
    print(args)

    init_seeds(args.seed, args.cuda)

    print("loading model...")
    device = torch.device('cuda') if args.cuda else torch.device('cpu')
    if args.cuda:
        gpu_owner = GPUOwner(lambda: torch.zeros((1), device='cuda'))

    lm = torch.load(args.load, map_location=device)
    print(lm)

    evaluator = SubstitutionalEnblockEvaluator_v2(
        lm,
        args.data,
        args.batch_size,
        args.target_seq_len,
        lambda streams: Corruptor(streams, args.subs_rate, len(lm.vocab), args.del_rate, args.ins_rate, protected=[lm.vocab['</s>']]),
        args.rounds,
    )
    eval_report = evaluator.evaluate(report_individual=args.individual)

    print('total loss {:.1f} | per token loss {:5.2f} | ppl {:8.2f}'.format(eval_report.total_loss, eval_report.loss_per_token, math.exp(eval_report.loss_per_token)))