Ejemplo n.º 1
0
        writer.add_histogram('Prior', prior, epoch)

        for xs, ys, xn, yn in test:

            zs, xs, xn = model(xs, ys.t(), xn, yn)

            loss1 = rnnt_loss(zs, ys, xn, yn, average_frames=False, reduction="mean")

            loss2 = -(zs.exp() * zs).sum(dim=-1).mean()

            xs = model.greedy_decode(xs, prior)

            err.update(loss1.item())
            ent.update(loss2.item())

            remove_blank(xs, xn, blank)

            cer.update(xs, ys, xn, yn)
            wer.update(xs, ys, xn, yn)

            test.set_description('Epoch %d %s %s %s %s' % (epoch, err, ent, cer, wer))

    sys.stderr.write('\n')

    err.summary(writer, epoch)
    ent.summary(writer, epoch)
    cer.summary(writer, epoch)
    wer.summary(writer, epoch)

    writer.flush()
Ejemplo n.º 2
0
        step += 1

        model.eval()

        with torch.no_grad():

            hs, hn = model.forward_acoustic(xs, xn)

            hs_k = hs.repeat(K, 1, 1)
            hn_k = hn.repeat(K)
            ys_k = ys.repeat(K, 1)
            yn_k = yn.repeat(K)

            hs_k = model.greedy_decode(hs_k, prior, sampled=True)

            remove_blank(hs_k, hn_k, blank)

            WER = compute_wer(hs_k, ys_k, hn_k, yn_k, blank, space)

            SymAcc = 1 - 0.5 * WER * (1 + yn_k.float() / hn_k.clamp_min(1).float())

            rewards = relu(SymAcc).reshape(K, -1).cuda()

            rewards_mean = rewards.mean().item()

            rewards -= rewards.mean(dim=0)

            elu(rewards, alpha=gamma, inplace=True)

            hs_k = hs_k.reshape(K, len(xs), -1)
            hn_k = hn_k.reshape(K, len(xs))