def test_cnf_computation(input_shape, latent_dim):
    np.random.seed(123)
    torch.manual_seed(123)

    x = torch.rand(*input_shape)
    latent = torch.randn(*x.shape[:-1], latent_dim) if latent_dim else None

    dim = x.shape[-1]
    in_dim = dim if latent_dim is None else dim + latent_dim
    transforms = [
        nf.ContinuousFlow(dim,
                          net=nf.net.DiffeqMLP(in_dim + 1, [32], dim),
                          atol=1e-8,
                          rtol=1e-8,
                          divergence='compute',
                          solver='dopri5',
                          has_latent=latent is not None)
    ]
    model = nf.Flow(nf.Normal(torch.zeros(dim), torch.ones(dim)), transforms)

    y, log_jac_y = model.forward(x, latent=latent)
    x_, log_jac_x = model.inverse(y, latent=latent)

    check_inverse(x, x_)
    check_jacobian(log_jac_x, log_jac_y)
    check_one_training_step(input_shape[-1], model, x, latent)
def test_spline(input_shape, n_bins, lower, upper, spline_type, latent_dim, num_layers):
    np.random.seed(123)
    torch.manual_seed(123)

    model = build_spline(input_shape[-1], n_bins, lower, upper, spline_type, latent_dim, num_layers)

    x = torch.rand(*input_shape)
    latent = torch.randn(1, latent_dim) if latent_dim is not None else None

    y, log_jac_y = model.forward(x, latent=latent)
    x_, log_jac_x = model.inverse(y, latent=latent)

    check_inverse(x, x_)
    check_jacobian(log_jac_x, log_jac_y)
    if num_layers > 0:
        check_one_training_step(input_shape[-1], model, x, latent)
def test_affine(input_shape, num_layers, latent_dim):
    np.random.seed(123)
    torch.manual_seed(123)

    model = build_affine(input_shape[-1], num_layers, latent_dim)

    x = torch.randn(*input_shape)
    latent = torch.randn(*x.shape[:-1], latent_dim) if latent_dim else None

    y, log_jac_y = model.forward(x, latent=latent)
    x_, log_jac_x = model.inverse(y, latent=latent)

    check_inverse(x, x_)
    check_jacobian(log_jac_x, log_jac_y)

    if num_layers > 0:
        check_one_training_step(input_shape[-1], model, x, latent)
Exemple #4
0
def test_coupling_flow(input_shape, hidden_dims, latent_dim, mask, flow_type,
                       num_layers):
    np.random.seed(123)
    torch.manual_seed(123)

    model = build_coupling_flow(input_shape[-1], hidden_dims, latent_dim, mask,
                                flow_type, num_layers)

    x = torch.rand(*input_shape)
    latent = torch.randn(*x.shape[:-1], latent_dim) if latent_dim else None

    y, log_jac_y = model.forward(x, latent=latent)
    x_, log_jac_x = model.inverse(y, latent=latent)

    check_inverse(x, x_)
    check_jacobian(log_jac_x, log_jac_y)
    if num_layers > 0:
        check_one_training_step(input_shape[-1], model, x, latent)
def test_diffeq_self_attention(input_shape):
    torch.manual_seed(123)

    dim = input_shape[-1]
    cnf = nf.ContinuousFlow(dim,
                            net=nf.net.DiffeqSelfAttention(dim + 1, [32], dim),
                            atol=1e-8,
                            rtol=1e-8,
                            divergence='compute',
                            solver='dopri5')
    model = nf.Flow(nf.Normal(torch.zeros(dim), torch.ones(dim)), [cnf])

    x = torch.rand(*input_shape)

    y, log_jac_y = model.forward(x)
    x_, log_jac_x = model.inverse(y)

    check_inverse(x, x_)
    check_jacobian(log_jac_x, log_jac_y)
    check_one_training_step(input_shape[-1], model, x, None)