Ejemplo n.º 1
0
def test_mlpg_variance_expand():
    static_dim = 2
    T = 10

    for windows in _get_windows_set():
        torch.manual_seed(1234)
        means = torch.rand(T, static_dim * len(windows), requires_grad=True)
        variances = torch.rand(static_dim * len(windows))
        variances_expanded = variances.expand(T, static_dim * len(windows))
        y = AF.mlpg(means, variances, windows)
        y_hat = AF.mlpg(means, variances_expanded, windows)
        assert np.allclose(y.data.numpy(), y_hat.data.numpy())
Ejemplo n.º 2
0
def test_functional_mlpg():
    static_dim = 2
    T = 5

    for windows in _get_windows_set():
        torch.manual_seed(1234)
        means = torch.rand(T, static_dim * len(windows))
        variances = torch.ones(static_dim * len(windows))

        y = G.mlpg(means.numpy(), variances.numpy(), windows)
        y = Variable(torch.from_numpy(y), requires_grad=False)

        means = Variable(means, requires_grad=True)

        # mlpg
        y_hat = AF.mlpg(means, variances, windows)
        assert np.allclose(y.data.numpy(), y_hat.data.numpy())

        # Test backward pass
        nn.MSELoss()(y_hat, y).backward()

        # unit_variance_mlpg
        R = torch.from_numpy(G.unit_variance_mlpg_matrix(windows, T))
        y_hat = AF.unit_variance_mlpg(R, means)
        assert np.allclose(y.data.numpy(), y_hat.data.numpy())

        nn.MSELoss()(y_hat, y).backward()

        # Test 3D tensor inputs
        y_hat = AF.unit_variance_mlpg(R, means.view(1, -1, means.size(-1)))
        assert np.allclose(
            y.data.numpy(), y_hat.data.view(-1, static_dim).numpy())

        nn.MSELoss()(y_hat.view(-1, static_dim), y).backward()
Ejemplo n.º 3
0
def benchmark_mlpg(static_dim=59, T=100, batch_size=10, use_cuda=True):
    if use_cuda and not torch.cuda.is_available():
        return

    windows = _get_windows_set()[-1]
    np.random.seed(1234)
    torch.manual_seed(1234)
    means = np.random.rand(T, static_dim * len(windows)).astype(np.float32)
    variances = np.ones(static_dim * len(windows))
    reshaped_means = G.reshape_means(means, static_dim)

    # Ppseud target
    y = G.mlpg(means, variances, windows).astype(np.float32)

    # Pack into variables
    means = Variable(torch.from_numpy(means), requires_grad=True)
    reshaped_means = Variable(torch.from_numpy(reshaped_means),
                              requires_grad=True)
    y = Variable(torch.from_numpy(y), requires_grad=False)
    criterion = nn.MSELoss()

    # Case 1: MLPG
    since = time.time()
    for _ in range(batch_size):
        y_hat = AF.mlpg(means, torch.from_numpy(variances), windows)
        L = criterion(y_hat, y)
        assert np.allclose(y_hat.data.numpy(), y.data.numpy())
        L.backward()  # slow!
    elapsed_mlpg = time.time() - since

    # Case 2: UnitVarianceMLPG
    since = time.time()
    if use_cuda:
        y = y.cuda()
    R = G.unit_variance_mlpg_matrix(windows, T)
    R = torch.from_numpy(R)
    # Assuming minibatch are zero-ppaded, we only need to create MLPG matrix
    # per-minibatch, not per-utterance.
    if use_cuda:
        R = R.cuda()
    for _ in range(batch_size):
        if use_cuda:
            means = means.cpu()
            means = means.cuda()

        y_hat = AF.unit_variance_mlpg(R, means)
        L = criterion(y_hat, y)
        assert np.allclose(y_hat.cpu().data.numpy(),
                           y.cpu().data.numpy(),
                           atol=1e-5)
        L.backward()
    elapsed_unit_variance_mlpg = time.time() - since

    ratio = elapsed_mlpg / elapsed_unit_variance_mlpg

    print(
        "MLPG vs UnitVarianceMLPG (static_dim, T, batch_size, use_cuda) = ({}):"
        .format((static_dim, T, batch_size, use_cuda)))
    if ratio > 1:
        s = "faster"
        sys.stdout.write(OKGREEN)
    else:
        s = "slower"
        sys.stdout.write(FAIL)
    print(
        "UnitVarianceMLPG, {:4f} times {}. Elapsed times {:4f} / {:4f}".format(
            ratio, s, elapsed_mlpg, elapsed_unit_variance_mlpg))

    print(ENDC)