Example #1
0
    def __init__(self, base_distribution, transforms, validate_args=None):
        if isinstance(transforms, Transform):
            transforms = [transforms]
        elif isinstance(transforms, list):
            if not all(isinstance(t, Transform) for t in transforms):
                raise ValueError(
                    "transforms must be a Transform or a list of Transforms"
                )
        else:
            raise ValueError(
                "transforms must be a Transform or list, but was {}".format(transforms)
            )
        if isinstance(base_distribution, TransformedDistribution):
            base_dist = base_distribution.base_dist
            self.transforms = base_distribution.transforms + transforms
        else:
            base_dist = base_distribution
            self.transforms = transforms
        base_shape = base_dist.shape()
        base_event_dim = base_dist.event_dim
        transform = ComposeTransform(self.transforms)
        domain_event_dim = transform.domain.event_dim
        if len(base_shape) < domain_event_dim:
            raise ValueError(
                "Base distribution needs to have shape with size at least {}, but got {}.".format(
                    domain_event_dim, base_shape
                )
            )
        shape = transform.forward_shape(base_shape)
        expanded_base_shape = transform.inverse_shape(shape)
        if base_shape != expanded_base_shape:
            base_batch_shape = expanded_base_shape[
                : len(expanded_base_shape) - base_event_dim
            ]
            base_dist = base_dist.expand(base_batch_shape)
        reinterpreted_batch_ndims = domain_event_dim - base_event_dim
        if reinterpreted_batch_ndims > 0:
            base_dist = base_dist.to_event(reinterpreted_batch_ndims)
        self.base_dist = base_dist

        # Compute shapes.
        event_dim = transform.codomain.event_dim + max(
            base_event_dim - domain_event_dim, 0
        )
        assert len(shape) >= event_dim
        cut = len(shape) - event_dim
        batch_shape = shape[:cut]
        event_shape = shape[cut:]
        super(TransformedDistribution, self).__init__(
            batch_shape, event_shape, validate_args=validate_args
        )
Example #2
0
    def get_transform(self, params):
        """
        Returns the transformation learned by the guide to generate samples from the unconstrained
        (approximate) posterior.

        :param dict params: Current parameters of model and autoguide.
        :return: the transform of posterior distribution
        :rtype: :class:`~numpyro.distributions.transforms.Transform`
        """
        return ComposeTransform([handlers.substitute(self._get_transform, params)(),
                                 UnpackTransform(self._unpack_latent)])
Example #3
0
 def __init__(self, base_distribution, transforms, validate_args=None):
     if isinstance(transforms, Transform):
         transforms = [
             transforms,
         ]
     elif isinstance(transforms, list):
         if not all(isinstance(t, Transform) for t in transforms):
             raise ValueError(
                 "transforms must be a Transform or a list of Transforms")
     else:
         raise ValueError(
             "transforms must be a Transform or list, but was {}".format(
                 transforms))
     # XXX: this logic will not be valid when IndependentDistribution is support;
     # in that case, it is more involved to support Transform(Indep(Transform));
     # however, we might not need to support such kind of distribution
     # and should raise an error if base_distribution is an Indep one
     if isinstance(base_distribution, TransformedDistribution):
         self.base_dist = base_distribution.base_dist
         self.transforms = base_distribution.transforms + transforms
     else:
         self.base_dist = base_distribution
         self.transforms = transforms
     # NB: here we assume that base_dist.shape == transformed_dist.shape
     # but that might not be True for some transforms such as StickBreakingTransform
     # because the event dimension is transformed from (n - 1,) to (n,).
     # Currently, we have no mechanism to fix this issue. Given that
     # this is just an edge case, we might skip this issue but need
     # to pay attention to any inference function that inspects
     # transformed distribution's shape.
     shape = base_distribution.batch_shape + base_distribution.event_shape
     base_ndim = len(shape)
     transform = ComposeTransform(self.transforms)
     transform_input_event_dim = transform.input_event_dim
     if base_ndim < transform_input_event_dim:
         raise ValueError(
             "Base distribution needs to have shape with size at least {}, but got {}."
             .format(transform_input_event_dim, base_ndim))
     event_dim = transform.output_event_dim + max(
         self.base_dist.event_dim - transform_input_event_dim, 0)
     # See the above note. Currently, there is no way to interpret the shape of output after
     # transforming. To solve this issue, we need something like Bijector.forward_event_shape
     # as in TFP. For now, we will prepend singleton dimensions to compromise, so that
     # event_dim, len(batch_shape) are still correct.
     if event_dim <= base_ndim:
         batch_shape = shape[:base_ndim - event_dim]
         event_shape = shape[base_ndim - event_dim:]
     else:
         event_shape = (-1, ) * event_dim
         batch_shape = ()
     super(TransformedDistribution,
           self).__init__(batch_shape,
                          event_shape,
                          validate_args=validate_args)
Example #4
0
 def _get_transform(self):
     if self.latent_size == 1:
         raise ValueError('latent dim = 1. Consider using AutoDiagonalNormal instead')
     hidden_dims = [self.latent_size, self.latent_size] if self._hidden_dims is None else self._hidden_dims
     flows = []
     for i in range(self.num_flows):
         if i > 0:
             flows.append(PermuteTransform(np.arange(self.latent_size)[::-1]))
         arn = AutoregressiveNN(self.latent_size, hidden_dims,
                                permutation=np.arange(self.latent_size),
                                skip_connections=self._skip_connections,
                                nonlinearity=self._nonlinearity)
         arnn = numpyro.module('{}_arn__{}'.format(self.prefix, i), arn, (self.latent_size,))
         flows.append(InverseAutoregressiveTransform(arnn))
     return ComposeTransform(flows)
Example #5
0
 def _get_transform(self):
     if self.latent_size == 1:
         raise ValueError(
             'latent dim = 1. Consider using AutoDiagonalNormal instead')
     flows = []
     for i in range(self.num_flows):
         if i > 0:
             flows.append(
                 PermuteTransform(np.arange(self.latent_size)[::-1]))
         residual = "gated" if i < (self.num_flows - 1) else None
         arn = BlockNeuralAutoregressiveNN(self.latent_size,
                                           self._hidden_factors, residual)
         arnn = numpyro.module('{}_arn__{}'.format(self.prefix, i), arn,
                               (self.latent_size, ))
         flows.append(BlockNeuralAutoregressiveTransform(arnn))
     return ComposeTransform(flows)
Example #6
0
    def get_transform(self, params):
        """
        Returns the transformation learned by the guide to generate samples from the unconstrained
        (approximate) posterior.

        :param dict params: Current parameters of model and autoguide.
            The parameters can be obtained using :meth:`~numpyro.infer.svi.SVI.get_params`
            method from :class:`~numpyro.infer.svi.SVI`.
        :return: the transform of posterior distribution
        :rtype: :class:`~numpyro.distributions.transforms.Transform`
        """
        posterior = handlers.substitute(self._get_posterior, params)()
        assert isinstance(posterior, dist.TransformedDistribution
                          ), "posterior is not a transformed distribution"
        if len(posterior.transforms) > 0:
            return ComposeTransform(posterior.transforms)
        else:
            return posterior.transforms[0]
Example #7
0
def _init_to_value(site, values={}, skip_param=False):
    if site['type'] == 'sample' and not site['is_observed']:
        if site['name'] not in values:
            return _init_to_uniform(site, skip_param=skip_param)

        value = values[site['name']]
        if isinstance(site['fn'], dist.TransformedDistribution):
            value = ComposeTransform(site['fn'].transforms).inv(value)
        return value

    if site['type'] == 'param' and not skip_param:
        # return base value of param site
        constraint = site['kwargs'].pop('constraint', real)
        transform = biject_to(constraint)
        value = site['args'][0]
        if isinstance(transform, ComposeTransform):
            base_transform = transform.parts[0]
            value = base_transform(transform.inv(value))
        return value
Example #8
0
 def process_message(self, msg):
     if self.param_map is not None:
         if msg['name'] in self.param_map:
             msg['value'] = self.param_map[msg['name']]
     else:
         base_value = self.substitute_fn(msg) if self.substitute_fn \
             else self.base_param_map.get(msg['name'], None)
         if base_value is not None:
             if msg['type'] == 'sample':
                 msg['value'], msg['intermediates'] = msg['fn'].transform_with_intermediates(
                     base_value)
             else:
                 constraint = msg['kwargs'].pop('constraint', real)
                 transform = biject_to(constraint)
                 if isinstance(transform, ComposeTransform):
                     # No need to apply the first transform since the base value
                     # should have the same support as the first part's co-domain.
                     msg['value'] = ComposeTransform(transform.parts[1:])(base_value)
                 else:
                     msg['value'] = base_value
Example #9
0
 def get_transform(self, params):
     return ComposeTransform([
         self._get_transform(params),
         UnpackTransform(self._unpack_latent)
     ])