Esempio n. 1
0
def build_maf(
        batch_x: Tensor = None,
        batch_y: Tensor = None,
        z_score_x: bool = True,
        z_score_y: bool = True,
        hidden_features: int = 50,
        num_transforms: int = 5,
        embedding_net: nn.Module = nn.Identity(),
) -> nn.Module:
    """Builds MAF p(x|y).

    Args:
        batch_x: Batch of xs, used to infer dimensionality and (optional) z-scoring.
        batch_y: Batch of ys, used to infer dimensionality and (optional) z-scoring.
        z_score_x: Whether to z-score xs passing into the network.
        z_score_y: Whether to z-score ys passing into the network.
        hidden_features: Number of hidden features.
        num_transforms: Number of transforms.
        embedding_net: Optional embedding network for y.

    Returns:
        Neural network.
    """
    x_numel = batch_x[0].numel()
    y_numel = batch_y[0].numel()

    if x_numel == 1:
        warn(
            f"In one-dimensional output space, this flow is limited to Gaussians"
        )

    transform = transforms.CompositeTransform([
        transforms.CompositeTransform([
            transforms.MaskedAffineAutoregressiveTransform(
                features=x_numel,
                hidden_features=hidden_features,
                context_features=y_numel,
                num_blocks=2,
                use_residual_blocks=False,
                random_mask=False,
                activation=tanh,
                dropout_probability=0.0,
                use_batch_norm=True,
            ),
            transforms.RandomPermutation(features=x_numel),
        ]) for _ in range(num_transforms)
    ])

    if z_score_x:
        transform_zx = standardizing_transform(batch_x)
        transform = transforms.CompositeTransform([transform_zx, transform])

    if z_score_y:
        embedding_net = nn.Sequential(standardizing_net(batch_y),
                                      embedding_net)

    distribution = distributions_.StandardNormal((x_numel, ))
    neural_net = flows.Flow(transform, distribution, embedding_net)

    return neural_net
Esempio n. 2
0
def build_made(
        batch_x: Tensor = None,
        batch_y: Tensor = None,
        z_score_x: bool = True,
        z_score_y: bool = True,
        hidden_features: int = 50,
        num_blocks: int = 5,
        num_mixture_components: int = 10,
        embedding_net: nn.Module = nn.Identity(),
) -> nn.Module:
    """Builds MADE p(x|y).

    Args:
        batch_x: Batch of xs, used to infer dimensionality and (optional) z-scoring.
        batch_y: Batch of ys, used to infer dimensionality and (optional) z-scoring.
        z_score_x: Whether to z-score xs passing into the network.
        z_score_y: Whether to z-score ys passing into the network.
        hidden_features: Number of hidden features.
        num_blocks: Number of MADE blocks.
        num_mixture_components: Number of mixture components.
        embedding_net: Optional embedding network for y.

    Returns:
        Neural network.
    """
    x_numel = batch_x[0].numel()
    y_numel = batch_y[0].numel()

    if x_numel == 1:
        warn(
            f"In one-dimensional output space, this flow is limited to Gaussians"
        )

    transform = transforms.IdentityTransform()

    if z_score_x:
        transform_zx = standardizing_transform(batch_x)
        transform = transforms.CompositeTransform([transform_zx, transform])

    if z_score_y:
        embedding_net = nn.Sequential(standardizing_net(batch_y),
                                      embedding_net)

    distribution = distributions_.MADEMoG(
        features=x_numel,
        hidden_features=hidden_features,
        context_features=y_numel,
        num_blocks=num_blocks,
        num_mixture_components=num_mixture_components,
        use_residual_blocks=True,
        random_mask=False,
        activation=relu,
        dropout_probability=0.0,
        use_batch_norm=False,
        custom_initialization=True,
    )

    neural_net = flows.Flow(transform, distribution, embedding_net)

    return neural_net
Esempio n. 3
0
def build_mdn(batch_x: Tensor = None,
              batch_y: Tensor = None,
              z_score_x: bool = True,
              z_score_y: bool = True,
              hidden_features: int = 50,
              num_components: int = 10,
              embedding_net: nn.Module = nn.Identity(),
              **kwargs) -> nn.Module:
    """Builds MDN p(x|y).

    Args:
        batch_x: Batch of xs, used to infer dimensionality and (optional) z-scoring.
        batch_y: Batch of ys, used to infer dimensionality and (optional) z-scoring.
        z_score_x: Whether to z-score xs passing into the network.
        z_score_y: Whether to z-score ys passing into the network.
        hidden_features: Number of hidden features.
        num_components: Number of components.
        embedding_net: Optional embedding network for y.
        kwargs: Additional arguments that are passed by the build function but are not
            relevant for MDNs and are therefore ignored.

    Returns:
        Neural network.
    """
    x_numel = batch_x[0].numel()
    # Infer the output dimensionality of the embedding_net by making a forward pass.
    y_numel = embedding_net(batch_y[:1]).numel()

    transform = transforms.IdentityTransform()

    if z_score_x:
        transform_zx = standardizing_transform(batch_x)
        transform = transforms.CompositeTransform([transform_zx, transform])

    if z_score_y:
        embedding_net = nn.Sequential(standardizing_net(batch_y),
                                      embedding_net)

    distribution = MultivariateGaussianMDN(
        features=x_numel,
        context_features=y_numel,
        hidden_features=hidden_features,
        hidden_net=nn.Sequential(
            nn.Linear(y_numel, hidden_features),
            nn.ReLU(),
            nn.Dropout(p=0.0),
            nn.Linear(hidden_features, hidden_features),
            nn.ReLU(),
            nn.Linear(hidden_features, hidden_features),
            nn.ReLU(),
        ),
        num_components=num_components,
        custom_initialization=True,
    )

    neural_net = flows.Flow(transform, distribution, embedding_net)

    return neural_net
Esempio n. 4
0
def build_nsf(
        batch_x: Tensor = None,
        batch_y: Tensor = None,
        z_score_x: bool = True,
        z_score_y: bool = True,
        hidden_features: int = 50,
        num_transforms: int = 5,
        embedding_net: nn.Module = nn.Identity(),
        **kwargs,
) -> nn.Module:
    """Builds NSF p(x|y).

    Args:
        batch_x: Batch of xs, used to infer dimensionality and (optional) z-scoring.
        batch_y: Batch of ys, used to infer dimensionality and (optional) z-scoring.
        z_score_x: Whether to z-score xs passing into the network.
        z_score_y: Whether to z-score ys passing into the network.
        hidden_features: Number of hidden features.
        num_transforms: Number of transforms.
        embedding_net: Optional embedding network for y.
        kwargs: Additional arguments that are passed by the build function but are not
            relevant for maf and are therefore ignored.

    Returns:
        Neural network.
    """
    x_numel = batch_x[0].numel()
    # Infer the output dimensionality of the embedding_net by making a forward pass.
    y_numel = embedding_net(batch_y[:1]).numel()

    if x_numel == 1:
        raise NotImplementedError

    transform = transforms.CompositeTransform([
        transforms.CompositeTransform([
            transforms.PiecewiseRationalQuadraticCouplingTransform(
                mask=create_alternating_binary_mask(features=x_numel,
                                                    even=(i % 2 == 0)),
                transform_net_create_fn=lambda in_features, out_features: nets.
                ResidualNet(
                    in_features=in_features,
                    out_features=out_features,
                    hidden_features=hidden_features,
                    context_features=y_numel,
                    num_blocks=2,
                    activation=relu,
                    dropout_probability=0.0,
                    use_batch_norm=False,
                ),
                num_bins=10,
                tails="linear",
                tail_bound=3.0,
                apply_unconditional_transform=False,
            ),
            transforms.LULinear(x_numel, identity_init=True),
        ]) for i in range(num_transforms)
    ])

    if z_score_x:
        transform_zx = standardizing_transform(batch_x)
        transform = transforms.CompositeTransform([transform_zx, transform])

    if z_score_y:
        embedding_net = nn.Sequential(standardizing_net(batch_y),
                                      embedding_net)

    distribution = distributions_.StandardNormal((x_numel, ))
    neural_net = flows.Flow(transform, distribution, embedding_net)

    return neural_net
Esempio n. 5
0
File: flow.py Progetto: bkmi/sbi
def build_maf(
    batch_x: Tensor,
    batch_y: Tensor,
    z_score_x: Optional[str] = "independent",
    z_score_y: Optional[str] = "independent",
    hidden_features: int = 50,
    num_transforms: int = 5,
    embedding_net: nn.Module = nn.Identity(),
    num_blocks: int = 2,
    dropout_probability: float = 0.0,
    use_batch_norm: bool = False,
    **kwargs,
) -> nn.Module:
    """Builds MAF p(x|y).

    Args:
        batch_x: Batch of xs, used to infer dimensionality and (optional) z-scoring.
        batch_y: Batch of ys, used to infer dimensionality and (optional) z-scoring.
        z_score_x: Whether to z-score xs passing into the network, can be one of:
            - `none`, or None: do not z-score.
            - `independent`: z-score each dimension independently.
            - `structured`: treat dimensions as related, therefore compute mean and std
            over the entire batch, instead of per-dimension. Should be used when each
            sample is, for example, a time series or an image.
        z_score_y: Whether to z-score ys passing into the network, same options as
            z_score_x.
        hidden_features: Number of hidden features.
        num_transforms: Number of transforms.
        embedding_net: Optional embedding network for y.
        num_blocks: number of blocks used for residual net for context embedding.
        dropout_probability: dropout probability for regularization in residual net.
        use_batch_norm: whether to use batch norm in residual net.
        kwargs: Additional arguments that are passed by the build function but are not
            relevant for maf and are therefore ignored.

    Returns:
        Neural network.
    """
    x_numel = batch_x[0].numel()
    # Infer the output dimensionality of the embedding_net by making a forward pass.
    check_data_device(batch_x, batch_y)
    check_embedding_net_device(embedding_net=embedding_net, datum=batch_y)
    y_numel = embedding_net(batch_y[:1]).numel()

    if x_numel == 1:
        warn(
            "In one-dimensional output space, this flow is limited to Gaussians"
        )

    transform_list = []
    for _ in range(num_transforms):
        block = [
            transforms.MaskedAffineAutoregressiveTransform(
                features=x_numel,
                hidden_features=hidden_features,
                context_features=y_numel,
                num_blocks=num_blocks,
                use_residual_blocks=False,
                random_mask=False,
                activation=tanh,
                dropout_probability=dropout_probability,
                use_batch_norm=use_batch_norm,
            ),
            transforms.RandomPermutation(features=x_numel),
        ]
        transform_list += block

    z_score_x_bool, structured_x = z_score_parser(z_score_x)
    if z_score_x_bool:
        transform_list = [standardizing_transform(batch_x, structured_x)
                          ] + transform_list

    z_score_y_bool, structured_y = z_score_parser(z_score_y)
    if z_score_y_bool:
        embedding_net = nn.Sequential(standardizing_net(batch_y, structured_y),
                                      embedding_net)

    # Combine transforms.
    transform = transforms.CompositeTransform(transform_list)

    distribution = distributions_.StandardNormal((x_numel, ))
    neural_net = flows.Flow(transform, distribution, embedding_net)

    return neural_net
Esempio n. 6
0
File: flow.py Progetto: bkmi/sbi
def build_made(
        batch_x: Tensor,
        batch_y: Tensor,
        z_score_x: Optional[str] = "independent",
        z_score_y: Optional[str] = "independent",
        hidden_features: int = 50,
        num_mixture_components: int = 10,
        embedding_net: nn.Module = nn.Identity(),
        **kwargs,
) -> nn.Module:
    """Builds MADE p(x|y).

    Args:
        batch_x: Batch of xs, used to infer dimensionality and (optional) z-scoring.
        batch_y: Batch of ys, used to infer dimensionality and (optional) z-scoring.
        z_score_x: Whether to z-score xs passing into the network, can be one of:
            - `none`, or None: do not z-score.
            - `independent`: z-score each dimension independently.
            - `structured`: treat dimensions as related, therefore compute mean and std
            over the entire batch, instead of per-dimension. Should be used when each
            sample is, for example, a time series or an image.
        z_score_y: Whether to z-score ys passing into the network, same options as
            z_score_x.
        hidden_features: Number of hidden features.
        num_mixture_components: Number of mixture components.
        embedding_net: Optional embedding network for y.
        kwargs: Additional arguments that are passed by the build function but are not
            relevant for mades and are therefore ignored.

    Returns:
        Neural network.
    """
    x_numel = batch_x[0].numel()
    # Infer the output dimensionality of the embedding_net by making a forward pass.
    check_data_device(batch_x, batch_y)
    check_embedding_net_device(embedding_net=embedding_net, datum=batch_y)
    y_numel = embedding_net(batch_y[:1]).numel()

    if x_numel == 1:
        warn(
            "In one-dimensional output space, this flow is limited to Gaussians"
        )

    transform = transforms.IdentityTransform()

    z_score_x_bool, structured_x = z_score_parser(z_score_x)
    if z_score_x_bool:
        transform_zx = standardizing_transform(batch_x, structured_x)
        transform = transforms.CompositeTransform([transform_zx, transform])

    z_score_y_bool, structured_y = z_score_parser(z_score_y)
    if z_score_y_bool:
        embedding_net = nn.Sequential(standardizing_net(batch_y, structured_y),
                                      embedding_net)

    distribution = distributions_.MADEMoG(
        features=x_numel,
        hidden_features=hidden_features,
        context_features=y_numel,
        num_blocks=5,
        num_mixture_components=num_mixture_components,
        use_residual_blocks=True,
        random_mask=False,
        activation=relu,
        dropout_probability=0.0,
        use_batch_norm=False,
        custom_initialization=True,
    )

    neural_net = flows.Flow(transform, distribution, embedding_net)

    return neural_net
Esempio n. 7
0
File: flow.py Progetto: bkmi/sbi
def build_nsf(
    batch_x: Tensor,
    batch_y: Tensor,
    z_score_x: Optional[str] = "independent",
    z_score_y: Optional[str] = "independent",
    hidden_features: int = 50,
    num_transforms: int = 5,
    num_bins: int = 10,
    embedding_net: nn.Module = nn.Identity(),
    tail_bound: float = 3.0,
    hidden_layers_spline_context: int = 1,
    num_blocks: int = 2,
    dropout_probability: float = 0.0,
    use_batch_norm: bool = False,
    **kwargs,
) -> nn.Module:
    """Builds NSF p(x|y).

    Args:
        batch_x: Batch of xs, used to infer dimensionality and (optional) z-scoring.
        batch_y: Batch of ys, used to infer dimensionality and (optional) z-scoring.
        z_score_x: Whether to z-score xs passing into the network, can be one of:
            - `none`, or None: do not z-score.
            - `independent`: z-score each dimension independently.
            - `structured`: treat dimensions as related, therefore compute mean and std
            over the entire batch, instead of per-dimension. Should be used when each
            sample is, for example, a time series or an image.
        z_score_y: Whether to z-score ys passing into the network, same options as
            z_score_x.
        hidden_features: Number of hidden features.
        num_transforms: Number of transforms.
        num_bins: Number of bins used for the splines.
        embedding_net: Optional embedding network for y.
        tail_bound: tail bound for each spline.
        hidden_layers_spline_context: number of hidden layers of the spline context net
            for one-dimensional x.
        num_blocks: number of blocks used for residual net for context embedding.
        dropout_probability: dropout probability for regularization in residual net.
        use_batch_norm: whether to use batch norm in residual net.
        kwargs: Additional arguments that are passed by the build function but are not
            relevant for maf and are therefore ignored.

    Returns:
        Neural network.
    """
    x_numel = batch_x[0].numel()
    # Infer the output dimensionality of the embedding_net by making a forward pass.
    check_data_device(batch_x, batch_y)
    check_embedding_net_device(embedding_net=embedding_net, datum=batch_y)
    y_numel = embedding_net(batch_y[:1]).numel()

    # Define mask function to alternate between predicted x-dimensions.
    def mask_in_layer(i):
        return create_alternating_binary_mask(features=x_numel,
                                              even=(i % 2 == 0))

    # If x is just a scalar then use a dummy mask and learn spline parameters using the
    # conditioning variables only.
    if x_numel == 1:

        # Conditioner ignores the data and uses the conditioning variables only.
        conditioner = partial(
            ContextSplineMap,
            hidden_features=hidden_features,
            context_features=y_numel,
            hidden_layers=hidden_layers_spline_context,
        )
    else:
        # Use conditional resnet as spline conditioner.
        conditioner = partial(
            nets.ResidualNet,
            hidden_features=hidden_features,
            context_features=y_numel,
            num_blocks=num_blocks,
            activation=relu,
            dropout_probability=dropout_probability,
            use_batch_norm=use_batch_norm,
        )

    # Stack spline transforms.
    transform_list = []
    for i in range(num_transforms):
        block = [
            transforms.PiecewiseRationalQuadraticCouplingTransform(
                mask=mask_in_layer(i) if x_numel > 1 else tensor([1],
                                                                 dtype=uint8),
                transform_net_create_fn=conditioner,
                num_bins=num_bins,
                tails="linear",
                tail_bound=tail_bound,
                apply_unconditional_transform=False,
            )
        ]
        # Add LU transform only for high D x. Permutation makes sense only for more than
        # one feature.
        if x_numel > 1:
            block.append(transforms.LULinear(x_numel, identity_init=True), )
        transform_list += block

    z_score_x_bool, structured_x = z_score_parser(z_score_x)
    if z_score_x_bool:
        # Prepend standardizing transform to nsf transforms.
        transform_list = [standardizing_transform(batch_x, structured_x)
                          ] + transform_list

    z_score_y_bool, structured_y = z_score_parser(z_score_y)
    if z_score_y_bool:
        # Prepend standardizing transform to y-embedding.
        embedding_net = nn.Sequential(standardizing_net(batch_y, structured_y),
                                      embedding_net)

    distribution = distributions_.StandardNormal((x_numel, ))

    # Combine transforms.
    transform = transforms.CompositeTransform(transform_list)
    neural_net = flows.Flow(transform, distribution, embedding_net)

    return neural_net
def neural_net_nsf(
    self,
    hidden_features,
    num_blocks,
    num_bins,
    xDim,
    thetaDim,
    batch_x=None,
    batch_theta=None,
    tail=3.,
    bounded=False,
    embedding_net=torch.nn.Identity()) -> torch.nn.Module:
    """Builds NSF p(x|y).

    Args:
        batch_x: Batch of xs, used to infer dimensionality and (optional) z-scoring.
        batch_y: Batch of ys, used to infer dimensionality and (optional) z-scoring.
        z_score_x: Whether to z-score xs passing into the network.
        z_score_y: Whether to z-score ys passing into the network.
        hidden_features: Number of hidden features.
        num_transforms: Number of transforms.
        embedding_net: Optional embedding network for y.
        kwargs: Additional arguments that are passed by the build function but are not
            relevant for maf and are therefore ignored.

    Returns:
        Neural network.
    """

    basic_transform = [
        transforms.CompositeTransform([
            transforms.PiecewiseRationalQuadraticCouplingTransform(
                mask=create_alternating_binary_mask(features=xDim,
                                                    even=(i % 2 == 0)).to(
                                                        self.args.device),
                transform_net_create_fn=lambda in_features, out_features: nets.
                ResidualNet(
                    in_features=in_features,
                    out_features=out_features,
                    hidden_features=hidden_features,
                    context_features=thetaDim,
                    num_blocks=2,
                    activation=torch.relu,
                    dropout_probability=0.,
                    use_batch_norm=False,
                ),
                num_bins=num_bins,
                tails='linear',
                tail_bound=tail,
                apply_unconditional_transform=False,
            ),
            transforms.RandomPermutation(features=xDim,
                                         device=self.args.device),
            transforms.LULinear(xDim, identity_init=True),
        ]) for i in range(num_blocks)
    ]

    transform = transforms.CompositeTransform(basic_transform).to(
        self.args.device)

    if batch_theta != None:
        if bounded:
            transform_bounded = transforms.Logit(self.args.device)
            if self.sim.min[0].item() != 0 or self.sim.max[0].item() != 1:
                transfomr_affine = transforms.PointwiseAffineTransform(
                    shift=-self.sim.min / (self.sim.max - self.sim.min),
                    scale=1. / (self.sim.max - self.sim.min))
                transform = transforms.CompositeTransform(
                    [transfomr_affine, transform_bounded, transform])
            else:
                transform = transforms.CompositeTransform(
                    [transform_bounded, transform])
        else:
            transform_zx = standardizing_transform(batch_x)
            transform = transforms.CompositeTransform(
                [transform_zx, transform])
        embedding_net = torch.nn.Sequential(standardizing_net(batch_theta),
                                            embedding_net)
        distribution = distributions_.StandardNormal((xDim, ),
                                                     self.args.device)
        neural_net = flows.Flow(self,
                                transform,
                                distribution,
                                embedding_net=embedding_net).to(
                                    self.args.device)
    else:
        distribution = distributions_.StandardNormal((xDim, ),
                                                     self.args.device)
        neural_net = flows.Flow(self, transform,
                                distribution).to(self.args.device)

    return neural_net
Esempio n. 9
0
def build_nsf(
        batch_x: Tensor = None,
        batch_y: Tensor = None,
        z_score_x: bool = True,
        z_score_y: bool = True,
        hidden_features: int = 50,
        num_transforms: int = 5,
        num_bins: int = 10,
        embedding_net: nn.Module = nn.Identity(),
        **kwargs,
) -> nn.Module:
    """Builds NSF p(x|y).

    Args:
        batch_x: Batch of xs, used to infer dimensionality and (optional) z-scoring.
        batch_y: Batch of ys, used to infer dimensionality and (optional) z-scoring.
        z_score_x: Whether to z-score xs passing into the network.
        z_score_y: Whether to z-score ys passing into the network.
        hidden_features: Number of hidden features.
        num_transforms: Number of transforms.
        num_bins: Number of bins used for the splines.
        embedding_net: Optional embedding network for y.
        kwargs: Additional arguments that are passed by the build function but are not
            relevant for maf and are therefore ignored.

    Returns:
        Neural network.
    """
    x_numel = batch_x[0].numel()
    # Infer the output dimensionality of the embedding_net by making a forward pass.
    y_numel = embedding_net(batch_y[:1]).numel()

    if x_numel == 1:

        class ContextSplineMap(nn.Module):
            """
            Neural network from `context` to the spline parameters.

            We cannot use the resnet as conditioner to learn each dimension conditioned
            on the other dimensions (because there is only one). Instead, we learn the
            spline parameters directly. In the case of conditinal density estimation,
            we make the spline parameters conditional on the context. This is
            implemented in this class.
            """
            def __init__(
                self,
                in_features: int,
                out_features: int,
                hidden_features: int,
                context_features: int,
            ):
                """
                Initialize neural network that learns to predict spline parameters.

                Args:
                    in_features: Unused since there is no `conditioner` in 1D.
                    out_features: Number of spline parameters.
                    hidden_features: Number of hidden units.
                    context_features: Number of context features.
                """
                super().__init__()
                # `self.hidden_features` is only defined such that nflows can infer
                # a scaling factor for initializations.
                self.hidden_features = hidden_features

                # Use a non-linearity because otherwise, there will be a linear
                # mapping from context features onto distribution parameters.
                self.spline_predictor = nn.Sequential(
                    nn.Linear(context_features, self.hidden_features),
                    nn.ReLU(),
                    nn.Linear(self.hidden_features, self.hidden_features),
                    nn.ReLU(),
                    nn.Linear(self.hidden_features, out_features),
                )

            def __call__(self, inputs: Tensor, context: Tensor, *args,
                         **kwargs) -> Tensor:
                """
                Return parameters of the spline given the context.

                Args:
                    inputs: Unused. It would usually be the other dimensions, but in
                        1D, there are no other dimensions.
                    context: Context features.

                Returns:
                    Spline parameters.
                """
                return self.spline_predictor(context)

        mask_in_layer = lambda i: tensor([1], dtype=uint8)
        conditioner = lambda in_features, out_features: ContextSplineMap(
            in_features,
            out_features,
            hidden_features,
            context_features=y_numel)
        if num_transforms > 1:
            warn(
                f"You are using `num_transforms={num_transforms}`. When estimating a "
                f"1D density, you will not get any performance increase by using "
                f"multiple transforms with NSF. We recommend setting "
                f"`num_transforms=1` for faster training (see also 'Change "
                f"hyperparameters of density esitmators' here: "
                f"https://www.mackelab.org/sbi/tutorial/04_density_estimators/)."
            )

    else:
        mask_in_layer = lambda i: create_alternating_binary_mask(
            features=x_numel, even=(i % 2 == 0))
        conditioner = lambda in_features, out_features: nets.ResidualNet(
            in_features=in_features,
            out_features=out_features,
            hidden_features=hidden_features,
            context_features=y_numel,
            num_blocks=2,
            activation=relu,
            dropout_probability=0.0,
            use_batch_norm=False,
        )

    transform = transforms.CompositeTransform([
        transforms.CompositeTransform([
            transforms.PiecewiseRationalQuadraticCouplingTransform(
                mask=mask_in_layer(i),
                transform_net_create_fn=conditioner,
                num_bins=num_bins,
                tails="linear",
                tail_bound=3.0,
                apply_unconditional_transform=False,
            ),
            transforms.LULinear(x_numel, identity_init=True),
        ]) for i in range(num_transforms)
    ])

    if z_score_x:
        transform_zx = standardizing_transform(batch_x)
        transform = transforms.CompositeTransform([transform_zx, transform])

    if z_score_y:
        embedding_net = nn.Sequential(standardizing_net(batch_y),
                                      embedding_net)

    distribution = distributions_.StandardNormal((x_numel, ))
    neural_net = flows.Flow(transform, distribution, embedding_net)

    return neural_net