예제 #1
0
파일: flows.py 프로젝트: tanghyd/gravflows
def create_NDE_model(input_dim, context_dim, num_flow_steps,
                     base_transform_kwargs):
    """Build NSF (neural spline flow) model. This uses the nsf module
    available at https://github.com/bayesiains/nsf.

    This models the posterior distribution p(x|y).

    The model consists of
        * a base distribution (StandardNormal, dim(x))
        * a sequence of transforms, each conditioned on y

    Arguments:
        input_dim {int} -- dimensionality of x
        context_dim {int} -- dimensionality of y
        num_flow_steps {int} -- number of sequential transforms
        base_transform_kwargs {dict} -- hyperparameters for transform steps

    Returns:
        Flow -- the model
    """
    distribution = distributions.StandardNormal((input_dim, ))
    transform = create_transform(num_flow_steps, input_dim, context_dim,
                                 base_transform_kwargs)
    flow = flows.Flow(transform, distribution)

    # Store hyperparameters - useful for loading from file.
    flow.model_hyperparams = {
        'input_dim': input_dim,
        'num_flow_steps': num_flow_steps,
        'context_dim': context_dim,
        'base_transform_kwargs': base_transform_kwargs
    }

    return flow
    def _create_approximate_posterior(self):
        posterior_transform = self._create_transform(
            self.maf_steps_posterior, self.posterior_context_size)
        distribution = StandardNormal((self.dimensions, ))

        return flows.Flow(transforms.InverseTransform(posterior_transform),
                          distribution)
예제 #3
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--run_dir', type=str, required=True)
    parser.add_argument('--data_dir', type=str, required=True)

    parser.add_argument('--batch_size', type=int, default=256)
    parser.add_argument('--geometric_prob', type=float, default=0.001)
    parser.add_argument('--learning_rate', type=float, default=0.0005)
    parser.add_argument('--num_evals', type=int, default=200)
    parser.add_argument('--num_iters', type=int, default=100000)
    parser.add_argument('--reconstr_coef', type=float, default=0.0)
    parser.add_argument('--use_gpu', type=bool, default=True)
    parser.add_argument('--seed', type=int, default=314833845)

    script_dir = Path(__file__).resolve().parent
    parser.add_argument('--data_config', type=str,
                        default=script_dir/'config'/'data_config.json')
    parser.add_argument('--flow_config', type=str,
                        default=script_dir/'config'/'flow_config.json')

    # For restarting runs.
    parser.add_argument('--flow_ckpt', type=str)
    parser.add_argument('--optimizer_ckpt', type=str)
    parser.add_argument('--first_iter', type=int, default=0)

    args = parser.parse_args()

    torch.manual_seed(args.seed)
    np.random.seed(args.seed)

    print('Loading data')
    with open(args.data_config) as fp:
        data_config = json.load(fp)
    train_dataset = create_dataset(root=args.data_dir,
                                   split='train',
                                   **data_config)
    c, h, w = train_dataset.img_shape

    print('Creating a flow')
    with open(args.flow_config) as fp:
        flow_config = json.load(fp)
    distribution = distributions.StandardNormal((c * h * w,))
    transform = create_transform(c, h, w, num_bits=data_config['num_bits'], **flow_config)
    flow = flows.Flow(transform, distribution)

    print('Training')
    train(flow=flow,
          train_dataset=train_dataset,
          run_dir=args.run_dir,
          num_iters=args.num_iters,
          batch_size=args.batch_size,
          reconstr_coef=args.reconstr_coef,
          geometric_prob=args.geometric_prob,
          use_gpu=args.use_gpu,
          num_evals=args.num_evals,
          lr=args.learning_rate,
          flow_ckpt=args.flow_ckpt,
          optimizer_ckpt=args.optimizer_ckpt,
          first_iter=args.first_iter)
예제 #4
0
def main():
    parser = argparse.ArgumentParser()

    parser.add_argument('--run_dir', type=str, required=True)
    parser.add_argument('--output_dir', type=str, required=True)
    parser.add_argument('--data_dir', type=str, required=True)

    parser.add_argument('--use_gpu', type=bool, default=True)
    parser.add_argument('--seed', type=int, default=314833845)

    script_dir = Path(__file__).resolve().parent
    parser.add_argument('--data_config', type=str,
                        default=script_dir / 'config' / 'data_config.json')
    parser.add_argument('--flow_config', type=str,
                        default=script_dir / 'config' / 'flow_config.json')

    args = parser.parse_args()

    run_dir = Path(args.run_dir)

    print('Loading data')

    with open(args.data_config) as fp:
        data_config = json.load(fp)

    # Load validation data
    valid_indices = torch.load(run_dir / 'valid_indices.pt')
    eval_dataset = create_dataset(root=args.data_dir,
                                  split='valid',
                                  valid_indices=valid_indices,
                                  **data_config)

    print('Creating a flow')

    with open(args.flow_config) as fp:
        flow_config = json.load(fp)

    c, h, w = eval_dataset.img_shape
    distribution = distributions.StandardNormal((c * h * w,))
    transform = create_transform(c, h, w,
                                 num_bits=data_config['num_bits'],
                                 **flow_config)
    flow = flows.Flow(transform, distribution)

    # Load checkpoint
    flow_ckpt = run_dir / 'latest_flow.pt'
    flow.load_state_dict(torch.load(flow_ckpt))
    print(f'Flow checkpoint loaded: {flow_ckpt}')

    output_dir = Path(args.output_dir)
    output_dir.mkdir(exist_ok=True)

    print('Sampling')
    sample(flow=flow,
           eval_dataset=eval_dataset,
           output_dir=output_dir,
           use_gpu=args.use_gpu,
           seed=args.seed)
예제 #5
0
    def _create_approximate_posterior(self):

        distribution = StandardNormal((self.dimensions, ))

        posterior_transform = self._create_transform(
            self.context_size, hidden_features=self.hidden_features)

        return flows.Flow(transforms.InverseTransform(posterior_transform),
                          distribution)
예제 #6
0
def create_flow(flow_type):
    distribution = distributions.StandardNormal((3,))

    if flow_type == 'lu_flow':
        transform = transforms.CompositeTransform([
            transforms.RandomPermutation(3),
            transforms.LULinear(3, identity_init=False)
        ])
    elif flow_type == 'qr_flow':
        transform = transforms.QRLinear(3, num_householder=3)
    else:
        raise RuntimeError('Unknown type')

    return flows.Flow(transform, distribution)
예제 #7
0
    def __init__(self, dimensions, flow_steps=5, lr=1e-3, epochs=100, batch_size=256, device=None):

        self.dimensions = dimensions
        self.flow_steps = flow_steps
        self.batch_size = batch_size
        self.epochs = epochs
        self.device = device
        self.lr = lr   
        transform = self._create_transform()

        self.flow = flows.Flow(
            transform,
            distributions.StandardNormal((self.dimensions,))
        )

        self.flow.to(self.device)
    def _create_approximate_posterior(self):

        # context_encoder = torch.nn.Linear(
        #     self.context_size,
        #     2 * self.dimensions
        # )

        # distribution = ConditionalDiagonalNormal(
        #     shape=(self.dimensions,),
        #     context_encoder=context_encoder
        # )

        distribution = StandardNormal((self.dimensions, ))

        posterior_transform = self._create_transform(self.context_size)

        return flows.Flow(transforms.InverseTransform(posterior_transform),
                          distribution)
예제 #9
0
def make_scalar_flow(
    dim,
    flow_steps=5,
    transform_type="rq",
    linear_transform="none",
    bins=10,
    tail_bound=10.0,
    hidden_features=64,
    num_transform_blocks=3,
    use_batch_norm=False,
    dropout_prob=0.0,
):
    logger.info(
        f"Creating flow for {dim}-dimensional unstructured data, using {flow_steps} blocks of {transform_type} transforms, "
        f"each with {num_transform_blocks} transform blocks and {hidden_features} hidden units, interlaced with {linear_transform} "
        f"linear transforms"
    )

    base_dist = distributions.StandardNormal((dim,))

    transform = []
    for i in range(flow_steps):
        if linear_transform != "none":
            transform.append(_make_scalar_linear_transform(linear_transform, dim))
        transform.append(
            _make_scalar_base_transform(
                i,
                dim,
                transform_type,
                bins,
                tail_bound,
                hidden_features,
                num_transform_blocks,
                use_batch_norm,
                dropout_prob=dropout_prob,
            )
        )
    if linear_transform != "none":
        transform.append(_make_scalar_linear_transform(linear_transform, dim))

    transform = transforms.CompositeTransform(transform)
    flow = flows.Flow(transform, base_dist)

    return flow
예제 #10
0
def make_flow(latent_dim, n_layers):
    transform_list = [BatchNormTransform(latent_dim)]
    for _ in range(n_layers):
        transform_list.extend(
            [
                transforms.MaskedAffineAutoregressiveTransform(
                    features=latent_dim,
                    hidden_features=64,
                ),
                transforms.RandomPermutation(latent_dim),
            ]
        )

    transform = transforms.CompositeTransform(transform_list)

    # Define a base distribution.
    base_distribution = distributions.StandardNormal(shape=[latent_dim])

    # Combine into a flow.
    return flows.Flow(transform=transform, distribution=base_distribution)
예제 #11
0
    def _create_likelihood(self):
        self.transform = self._create_transform(self.maf_steps_prior)
        distribution = StandardNormal((self.dimensions, ))

        return flows.Flow(self.transform, distribution)
예제 #12
0
 def _create_prior(self):
     self.transform = self._create_transform()
     distribution = StandardNormal((self.dimensions, ))
     return flows.Flow(self.transform, distribution)
def neural_net_nsf(
    self,
    hidden_features,
    num_blocks,
    num_bins,
    xDim,
    thetaDim,
    batch_x=None,
    batch_theta=None,
    tail=3.,
    bounded=False,
    embedding_net=torch.nn.Identity()) -> torch.nn.Module:
    """Builds NSF p(x|y).

    Args:
        batch_x: Batch of xs, used to infer dimensionality and (optional) z-scoring.
        batch_y: Batch of ys, used to infer dimensionality and (optional) z-scoring.
        z_score_x: Whether to z-score xs passing into the network.
        z_score_y: Whether to z-score ys passing into the network.
        hidden_features: Number of hidden features.
        num_transforms: Number of transforms.
        embedding_net: Optional embedding network for y.
        kwargs: Additional arguments that are passed by the build function but are not
            relevant for maf and are therefore ignored.

    Returns:
        Neural network.
    """

    basic_transform = [
        transforms.CompositeTransform([
            transforms.PiecewiseRationalQuadraticCouplingTransform(
                mask=create_alternating_binary_mask(features=xDim,
                                                    even=(i % 2 == 0)).to(
                                                        self.args.device),
                transform_net_create_fn=lambda in_features, out_features: nets.
                ResidualNet(
                    in_features=in_features,
                    out_features=out_features,
                    hidden_features=hidden_features,
                    context_features=thetaDim,
                    num_blocks=2,
                    activation=torch.relu,
                    dropout_probability=0.,
                    use_batch_norm=False,
                ),
                num_bins=num_bins,
                tails='linear',
                tail_bound=tail,
                apply_unconditional_transform=False,
            ),
            transforms.RandomPermutation(features=xDim,
                                         device=self.args.device),
            transforms.LULinear(xDim, identity_init=True),
        ]) for i in range(num_blocks)
    ]

    transform = transforms.CompositeTransform(basic_transform).to(
        self.args.device)

    if batch_theta != None:
        if bounded:
            transform_bounded = transforms.Logit(self.args.device)
            if self.sim.min[0].item() != 0 or self.sim.max[0].item() != 1:
                transfomr_affine = transforms.PointwiseAffineTransform(
                    shift=-self.sim.min / (self.sim.max - self.sim.min),
                    scale=1. / (self.sim.max - self.sim.min))
                transform = transforms.CompositeTransform(
                    [transfomr_affine, transform_bounded, transform])
            else:
                transform = transforms.CompositeTransform(
                    [transform_bounded, transform])
        else:
            transform_zx = standardizing_transform(batch_x)
            transform = transforms.CompositeTransform(
                [transform_zx, transform])
        embedding_net = torch.nn.Sequential(standardizing_net(batch_theta),
                                            embedding_net)
        distribution = distributions_.StandardNormal((xDim, ),
                                                     self.args.device)
        neural_net = flows.Flow(self,
                                transform,
                                distribution,
                                embedding_net=embedding_net).to(
                                    self.args.device)
    else:
        distribution = distributions_.StandardNormal((xDim, ),
                                                     self.args.device)
        neural_net = flows.Flow(self, transform,
                                distribution).to(self.args.device)

    return neural_net
예제 #14
0
 def _create_prior(self):
     self.transform = self._create_transform(
         context_features=None, hidden_features=self.hidden_features)
     distribution = StandardNormal((self.dimensions, ))
     return flows.Flow(self.transform, distribution)
예제 #15
0
def make_image_flow(
    chw,
    levels=7,
    steps_per_level=3,
    transform_type="rq",
    bins=4,
    tail_bound=3.0,
    hidden_channels=96,
    act_norm=True,
    batch_norm=False,
    dropout_prob=0.0,
    alpha=0.05,
    num_bits=8,
    preprocessing="glow",
    residual_blocks=3,
):
    c, h, w = chw
    if not isinstance(hidden_channels, list):
        hidden_channels = [hidden_channels] * levels

    # Base density
    base_dist = distributions.StandardNormal((c * h * w,))
    logger.debug(f"Base density: standard normal in {c * h * w} dimensions")

    # Preprocessing: Inputs to the model in [0, 2 ** num_bits]
    if preprocessing == "glow":
        # Map to [-0.5,0.5]
        preprocess_transform = transforms.AffineScalarTransform(scale=(1.0 / 2 ** num_bits), shift=-0.5)
    elif preprocessing == "realnvp":
        preprocess_transform = transforms.CompositeTransform(
            [
                # Map to [0,1]
                transforms.AffineScalarTransform(scale=(1.0 / 2 ** num_bits)),
                # Map into unconstrained space as done in RealNVP
                transforms.AffineScalarTransform(shift=alpha, scale=(1 - alpha)),
                transforms.Logit(),
            ]
        )
    elif preprocessing == "realnvp_2alpha":
        preprocess_transform = transforms.CompositeTransform(
            [
                transforms.AffineScalarTransform(scale=(1.0 / 2 ** num_bits)),
                transforms.AffineScalarTransform(shift=alpha, scale=(1 - 2.0 * alpha)),
                transforms.Logit(),
            ]
        )
    else:
        raise RuntimeError("Unknown preprocessing type: {}".format(preprocessing))

    logger.debug(f"{preprocessing} preprocessing")

    # Multi-scale transform
    logger.debug("Input: c, h, w = %s, %s, %s", c, h, w)
    mct = transforms.MultiscaleCompositeTransform(num_transforms=levels)
    for level, level_hidden_channels in zip(range(levels), hidden_channels):
        logger.debug("Level %s", level)
        squeeze_transform = transforms.SqueezeTransform()
        c, h, w = squeeze_transform.get_output_shape(c, h, w)
        logger.debug("  c, h, w = %s, %s, %s", c, h, w)
        transform_level = [squeeze_transform]
        logger.debug("  SqueezeTransform()")

        for _ in range(steps_per_level):
            transform_level.append(
                _make_image_base_transform(
                    c,
                    level_hidden_channels,
                    act_norm,
                    transform_type,
                    residual_blocks,
                    batch_norm,
                    dropout_prob,
                    tail_bound,
                    bins,
                )
            )

        transform_level.append(transforms.OneByOneConvolution(c))  # End each level with a linear transformation
        logger.debug("  OneByOneConvolution(%s)", c)
        transform_level = transforms.CompositeTransform(transform_level)

        new_shape = mct.add_transform(transform_level, (c, h, w))
        if new_shape:  # If not last layer
            c, h, w = new_shape
            logger.debug("  new_shape = %s, %s, %s", c, h, w)

    # Full transform and flow
    transform = transforms.CompositeTransform([preprocess_transform, mct])
    flow = flows.Flow(transform, base_dist)

    return flow
예제 #16
0
def get_flow(
    model: str,
    dim_distribution: int,
    dim_context: Optional[int] = None,
    embedding: Optional[torch.nn.Module] = None,
    hidden_features: int = 50,
    made_num_mixture_components: int = 10,
    made_num_blocks: int = 4,
    flow_num_transforms: int = 5,
    mean=0.0,
    std=1.0,
) -> torch.nn.Module:
    """Density estimator

    Args:
        model: Model, one of maf / made / nsf
        dim_distribution: Dim of distribution
        dim_context: Dim of context
        embedding: Embedding network
        hidden_features: For all, number of hidden features
        made_num_mixture_components: For MADEs only, number of mixture components
        made_num_blocks: For MADEs only, number of blocks
        flow_num_transforms: For flows only, number of transforms
        mean: For normalization
        std: For normalization

    Returns:
        Neural network
    """
    standardizing_transform = transforms.AffineTransform(shift=-mean / std,
                                                         scale=1 / std)

    features = dim_distribution
    context_features = dim_context

    if model == "made":
        transform = standardizing_transform
        distribution = distributions_.MADEMoG(
            features=features,
            hidden_features=hidden_features,
            context_features=context_features,
            num_blocks=made_num_blocks,
            num_mixture_components=made_num_mixture_components,
            use_residual_blocks=True,
            random_mask=False,
            activation=torch.relu,
            dropout_probability=0.0,
            use_batch_norm=False,
            custom_initialization=True,
        )
        neural_net = flows.Flow(transform, distribution, embedding)

    elif model == "maf":
        transform = transforms.CompositeTransform([
            transforms.CompositeTransform([
                transforms.MaskedAffineAutoregressiveTransform(
                    features=features,
                    hidden_features=hidden_features,
                    context_features=context_features,
                    num_blocks=2,
                    use_residual_blocks=False,
                    random_mask=False,
                    activation=torch.tanh,
                    dropout_probability=0.0,
                    use_batch_norm=True,
                ),
                transforms.RandomPermutation(features=features),
            ]) for _ in range(flow_num_transforms)
        ])

        transform = transforms.CompositeTransform(
            [standardizing_transform, transform])

        distribution = distributions_.StandardNormal((features, ))
        neural_net = flows.Flow(transform, distribution, embedding)

    elif model == "nsf":
        transform = transforms.CompositeTransform([
            transforms.CompositeTransform([
                transforms.PiecewiseRationalQuadraticCouplingTransform(
                    mask=create_alternating_binary_mask(features=features,
                                                        even=(i % 2 == 0)),
                    transform_net_create_fn=lambda in_features, out_features:
                    nets.ResidualNet(
                        in_features=in_features,
                        out_features=out_features,
                        hidden_features=hidden_features,
                        context_features=context_features,
                        num_blocks=2,
                        activation=torch.relu,
                        dropout_probability=0.0,
                        use_batch_norm=False,
                    ),
                    num_bins=10,
                    tails="linear",
                    tail_bound=3.0,
                    apply_unconditional_transform=False,
                ),
                transforms.LULinear(features, identity_init=True),
            ]) for i in range(flow_num_transforms)
        ])

        transform = transforms.CompositeTransform(
            [standardizing_transform, transform])

        distribution = distributions_.StandardNormal((features, ))
        neural_net = flows.Flow(transform, distribution, embedding)

    elif model == "nsf_bounded":

        transform = transforms.CompositeTransform([
            transforms.CompositeTransform([
                transforms.PiecewiseRationalQuadraticCouplingTransform(
                    mask=create_alternating_binary_mask(
                        features=dim_distribution, even=(i % 2 == 0)),
                    transform_net_create_fn=lambda in_features, out_features:
                    nets.ResidualNet(
                        in_features=in_features,
                        out_features=out_features,
                        hidden_features=hidden_features,
                        context_features=context_features,
                        num_blocks=2,
                        activation=F.relu,
                        dropout_probability=0.0,
                        use_batch_norm=False,
                    ),
                    num_bins=10,
                    tails="linear",
                    tail_bound=np.sqrt(
                        3),  # uniform with sqrt(3) bounds has unit-variance
                    apply_unconditional_transform=False,
                ),
                transforms.RandomPermutation(features=dim_distribution),
            ]) for i in range(flow_num_transforms)
        ])

        transform = transforms.CompositeTransform(
            [standardizing_transform, transform])

        distribution = StandardUniform(shape=(dim_distribution, ))
        neural_net = flows.Flow(transform, distribution, embedding)

    else:
        raise ValueError

    return neural_net