Esempio n. 1
0
def test_reshape_means():
    static_dim = 2
    T = 10

    for windows in _get_windows_set():
        means = np.random.rand(T, static_dim * len(windows))
        reshaped_means = G.reshape_means(means, static_dim)
        assert reshaped_means.shape == (T * len(windows), static_dim)
        reshaped_means2 = G.reshape_means(reshaped_means, static_dim)
        # Test if call on reshaped means doesn't change anything
        assert np.allclose(reshaped_means, reshaped_means2)
Esempio n. 2
0
def test_unit_variance_mlpg():
    static_dim = 2
    T = 10

    for windows in _get_windows_set():
        means = np.random.rand(T, static_dim * len(windows))
        variances = np.ones(static_dim * len(windows))
        y = G.mlpg(means, variances, windows)

        R = G.unit_variance_mlpg_matrix(windows, T)
        y_hat = R.dot(G.reshape_means(means, static_dim))
        assert np.allclose(y_hat, y)
Esempio n. 3
0
def test_unit_variance_mlpg_gradcheck():
    static_dim = 2
    T = 10

    for windows in _get_windows_set():
        torch.manual_seed(1234)
        # Meens, input for MLPG
        means = Variable(torch.rand(T, static_dim * len(windows)),
                         requires_grad=True)

        # Input for UnitVarianceMLPG
        reshaped_means = G.reshape_means(
            means.data.clone().numpy(), static_dim)
        reshaped_means = Variable(torch.from_numpy(reshaped_means),
                                  requires_grad=True)

        # Compute MLPG matrix
        R = G.unit_variance_mlpg_matrix(windows, T).astype(np.float32)
        R = torch.from_numpy(R)

        # UnitVarianceMLPG can take input with both means and reshaped_means
        y1 = UnitVarianceMLPG(R)(means)
        y2 = UnitVarianceMLPG(R)(reshaped_means)

        # Unit variances
        variances = torch.ones(static_dim * len(windows)
                               ).expand(T, static_dim * len(windows))
        y_hat = MLPG(variances, windows)(means)

        # Make sure UnitVarianceMLPG and MLPG can get same result
        # if we use unit variances
        for y in [y1, y2]:
            assert np.allclose(y.data.numpy(), y_hat.data.numpy())

        # Grad check
        inputs = (reshaped_means,)
        assert gradcheck(UnitVarianceMLPG(R),
                         inputs, eps=1e-3, atol=1e-3)

        inputs = (means,)
        assert gradcheck(UnitVarianceMLPG(R),
                         inputs, eps=1e-3, atol=1e-3)
Esempio n. 4
0
def test_minibatch_unit_variance_mlpg_gradcheck():
    static_dim = 2
    T = 5

    for windows in _get_windows_set():
        batch_size = 5
        torch.manual_seed(1234)

        # Prepare inputs
        means = torch.rand(T, static_dim * len(windows))
        means_expanded = means.expand(
            batch_size, means.shape[0], means.shape[1])
        reshaped_means = torch.from_numpy(
            G.reshape_means(means.numpy(), static_dim))
        reshaped_means_expanded = reshaped_means.expand(
            batch_size, reshaped_means.shape[0], reshaped_means.shape[1])

        # Target
        y = G.mlpg(means.numpy(), np.ones(static_dim * len(windows)), windows)
        y = Variable(torch.from_numpy(y), requires_grad=False)
        y_expanded = y.expand(batch_size, y.size(0), y.size(1))

        # Pack into variables
        means = Variable(means, requires_grad=True)
        means_expanded = Variable(means_expanded, requires_grad=True)
        reshaped_means = Variable(reshaped_means, requires_grad=True)
        reshaped_means_expanded = Variable(
            reshaped_means_expanded, requires_grad=True)

        # Case 1: 2d with reshaped means
        R = torch.from_numpy(G.unit_variance_mlpg_matrix(windows, T))
        y_hat1 = AF.unit_variance_mlpg(R, reshaped_means)

        # Case 2: 3d with reshaped means
        y_hat2 = AF.unit_variance_mlpg(R, reshaped_means_expanded)
        for i in range(batch_size):
            assert np.allclose(y_hat1.data.numpy(), y_hat2[i].data.numpy())

        nn.MSELoss()(y_hat1, y).backward()
        nn.MSELoss()(y_hat2, y_expanded).backward()

        # Check grad consistency
        for i in range(batch_size):
            grad1 = reshaped_means.grad.data.numpy()
            grad2 = reshaped_means_expanded.grad[i].data.numpy()
            assert np.allclose(grad1, grad2)

        # Case 3: 2d with non-reshaped input
        y_hat3 = AF.unit_variance_mlpg(R, means)

        # Case 4: 3d with non-reshaped input
        y_hat4 = AF.unit_variance_mlpg(R, means_expanded)

        for i in range(batch_size):
            assert np.allclose(y_hat1.data.numpy(), y_hat3.data.numpy())
            assert np.allclose(y_hat3.data.numpy(), y_hat4[i].data.numpy())

        nn.MSELoss()(y_hat3, y).backward()
        nn.MSELoss()(y_hat4, y_expanded).backward()

        # Check grad consistency
        for i in range(batch_size):
            grad1 = means.grad.data.numpy()
            grad2 = means_expanded.grad[i].data.numpy()
            assert np.allclose(grad1, grad2)
Esempio n. 5
0
def benchmark_mlpg(static_dim=59, T=100, batch_size=10, use_cuda=True):
    if use_cuda and not torch.cuda.is_available():
        return

    windows = _get_windows_set()[-1]
    np.random.seed(1234)
    torch.manual_seed(1234)
    means = np.random.rand(T, static_dim * len(windows)).astype(np.float32)
    variances = np.ones(static_dim * len(windows))
    reshaped_means = G.reshape_means(means, static_dim)

    # Ppseud target
    y = G.mlpg(means, variances, windows).astype(np.float32)

    # Pack into variables
    means = Variable(torch.from_numpy(means), requires_grad=True)
    reshaped_means = Variable(torch.from_numpy(reshaped_means),
                              requires_grad=True)
    y = Variable(torch.from_numpy(y), requires_grad=False)
    criterion = nn.MSELoss()

    # Case 1: MLPG
    since = time.time()
    for _ in range(batch_size):
        y_hat = AF.mlpg(means, torch.from_numpy(variances), windows)
        L = criterion(y_hat, y)
        assert np.allclose(y_hat.data.numpy(), y.data.numpy())
        L.backward()  # slow!
    elapsed_mlpg = time.time() - since

    # Case 2: UnitVarianceMLPG
    since = time.time()
    if use_cuda:
        y = y.cuda()
    R = G.unit_variance_mlpg_matrix(windows, T)
    R = torch.from_numpy(R)
    # Assuming minibatch are zero-ppaded, we only need to create MLPG matrix
    # per-minibatch, not per-utterance.
    if use_cuda:
        R = R.cuda()
    for _ in range(batch_size):
        if use_cuda:
            means = means.cpu()
            means = means.cuda()

        y_hat = AF.unit_variance_mlpg(R, means)
        L = criterion(y_hat, y)
        assert np.allclose(y_hat.cpu().data.numpy(),
                           y.cpu().data.numpy(),
                           atol=1e-5)
        L.backward()
    elapsed_unit_variance_mlpg = time.time() - since

    ratio = elapsed_mlpg / elapsed_unit_variance_mlpg

    print(
        "MLPG vs UnitVarianceMLPG (static_dim, T, batch_size, use_cuda) = ({}):"
        .format((static_dim, T, batch_size, use_cuda)))
    if ratio > 1:
        s = "faster"
        sys.stdout.write(OKGREEN)
    else:
        s = "slower"
        sys.stdout.write(FAIL)
    print(
        "UnitVarianceMLPG, {:4f} times {}. Elapsed times {:4f} / {:4f}".format(
            ratio, s, elapsed_mlpg, elapsed_unit_variance_mlpg))

    print(ENDC)