Beispiel #1
0
 def log_prob(self, given, group_ndims=0, name=None):
     with tf.name_scope(name=name,
                        default_name=get_default_scope_name(
                            'log_prob', self)):
         given = self._distribution._check_input_shape(given)
         log_prob = self._distribution._log_prob(given)
         return reduce_group_ndims(tf.reduce_sum, log_prob, group_ndims)
Beispiel #2
0
 def prob(self, given, group_ndims=0, name=None):
     with tf.name_scope(name,
                        default_name=get_default_scope_name('prob', self)):
         log_p = self.log_prob(given, group_ndims=group_ndims)
         p = tf.exp(log_p)
     return FlowDistributionDerivedTensor(tensor=p,
                                          flow_origin=log_p.flow_origin)
Beispiel #3
0
 def __call__(self, x):
     y, _ = self.transform(x=x,
                           compute_y=True,
                           compute_log_det=False,
                           name=get_default_scope_name(
                               camel_to_underscore(
                                   self.__class__.__name__)))
     return y
Beispiel #4
0
    def transform(self, x, compute_y=True, compute_log_det=True, name=None):
        """
        Transform `x` into `y`, and compute the log-determinant of `f` at `x`
        (i.e., :math:`\\log \\det \\frac{\\partial f(x)}{\\partial x}`).

        Args:
            x (Tensor): The samples of `x`.
            compute_y (bool): Whether or not to compute :math:`y = f(x)`?
                Default :obj:`True`.
            compute_log_det (bool): Whether or not to compute the
                log-determinant?  Default :obj:`True`.
            name (str): If specified, will use this name as the TensorFlow
                operational name scope.

        Returns:
            (tf.Tensor, tf.Tensor): `y` and the (maybe summed) log-determinant.
                The items in the returned tuple might be :obj:`None`
                if corresponding `compute_?` argument is set to :obj:`False`.

        Raises:
            RuntimeError: If both `compute_y` and `compute_log_det` are set
                to :obj:`False`.
        """
        if not compute_y and not compute_log_det:
            raise ValueError('At least one of `compute_y` and '
                             '`compute_log_det` should be True.')

        x = tf.convert_to_tensor(x)
        if not self._has_built:
            self.build(x)

        x = self._x_input_spec.validate('x', x)

        with tf.name_scope(name,
                           default_name=get_default_scope_name(
                               'transform', self),
                           values=[x]):
            y, log_det = self._transform(x, compute_y, compute_log_det)

            if compute_log_det:
                with assert_deps([
                        assert_log_det_shape_matches_input(
                            log_det=log_det,
                            input=x,
                            value_ndims=self.x_value_ndims)
                ]) as asserted:
                    if asserted:  # pragma: no cover
                        log_det = tf.identity(log_det)

            if y is not None:
                maybe_add_histogram(y, 'y')
                y = maybe_check_numerics(y, 'y')

            if log_det is not None:
                maybe_add_histogram(log_det, 'log_det')
                log_det = maybe_check_numerics(log_det, 'log_det')

            return y, log_det
Beispiel #5
0
    def inverse_transform(self,
                          y,
                          compute_x=True,
                          compute_log_det=True,
                          value_ndims=0,
                          name=None):
        """
        Transform `y` into `x`, and compute the log-determinant of `f^{-1}` at
        `y` (i.e.,
        :math:`\\log \\det \\frac{\\partial f^{-1}(y)}{\\partial y}`).

        Args:
            y (Tensor): The samples of `y`.
            compute_x (bool): Whether or not to compute :math:`x = f^{-1}(y)`?
                Default :obj:`True`.
            compute_log_det (bool): Whether or not to compute the
                log-determinant?  Default :obj:`True`.
            value_ndims (int): Number of value dimensions.
                `log_det.ndims == y.ndims - value_ndims`.
            name (str): If specified, will use this name as the TensorFlow
                operational name scope.

        Returns:
            (tf.Tensor, tf.Tensor): `x` and the (maybe summed) log-determinant.
                The items in the returned tuple might be :obj:`None`
                if corresponding `compute_?` argument is set to :obj:`False`.

        Raises:
            RuntimeError: If both `compute_x` and `compute_log_det` are set
                to :obj:`False`.
            RuntimeError: If the flow is not explicitly invertible.
        """
        if not compute_x and not compute_log_det:
            raise ValueError('At least one of `compute_x` and '
                             '`compute_log_det` should be True.')

        value_ndims = int(value_ndims)
        if value_ndims < 0:
            raise ValueError(
                '`value_ndims` must be >= 0: got {}'.format(value_ndims))

        y = tf.convert_to_tensor(y)

        with tf.name_scope(name,
                           default_name=get_default_scope_name(
                               'inverse_transform', self),
                           values=[y]):
            x, log_det = self._inverse_transform(
                y=y, compute_x=compute_x, compute_log_det=compute_log_det)
            if log_det is not None and value_ndims > 0:
                log_det = tf.reduce_sum(log_det,
                                        axis=list(range(-value_ndims, 0)))

            return x, log_det
Beispiel #6
0
    def transform(self,
                  x,
                  compute_y=True,
                  compute_log_det=True,
                  value_ndims=0,
                  name=None):
        """
        Transform `x` into `y`, and compute the log-determinant of `f` at `x`
        (i.e., :math:`\\log \\det \\frac{\\partial f(x)}{\\partial x}`).

        Args:
            x (Tensor): The samples of `x`.
            compute_y (bool): Whether or not to compute :math:`y = f(x)`?
                Default :obj:`True`.
            compute_log_det (bool): Whether or not to compute the
                log-determinant?  Default :obj:`True`.
            value_ndims (int): Number of value dimensions.
                `log_det.ndims == x.ndims - value_ndims`.
            name (str): If specified, will use this name as the TensorFlow
                operational name scope.

        Returns:
            (tf.Tensor, tf.Tensor): `y` and the (maybe summed) log-determinant.
                The items in the returned tuple might be :obj:`None`
                if corresponding `compute_?` argument is set to :obj:`False`.

        Raises:
            RuntimeError: If both `compute_y` and `compute_log_det` are set
                to :obj:`False`.
        """
        if not compute_y and not compute_log_det:
            raise ValueError('At least one of `compute_y` and '
                             '`compute_log_det` should be True.')

        value_ndims = int(value_ndims)
        if value_ndims < 0:
            raise ValueError(
                '`value_ndims` must be >= 0: got {}'.format(value_ndims))

        x = tf.convert_to_tensor(x)

        with tf.name_scope(name,
                           default_name=get_default_scope_name(
                               'transform', self),
                           values=[x]):
            y, log_det = self._transform(x=x,
                                         compute_y=compute_y,
                                         compute_log_det=compute_log_det)
            if log_det is not None and value_ndims > 0:
                log_det = tf.reduce_sum(log_det,
                                        axis=list(range(-value_ndims, 0)))

            return y, log_det
Beispiel #7
0
    def prob(self, given, group_ndims=0, name=None):
        """
        Compute the densities of `x` against the distribution.

        Args:
            given (Tensor): The samples to be tested.
            group_ndims (int or tf.Tensor): If specified, the last `group_ndims`
                dimensions of the log-densities will be summed up. (default 0)
            name: TensorFlow name scope of the graph nodes.
                (default "prob").

        Returns:
            tf.Tensor: The densities of `given`.
        """
        with tf.name_scope(name,
                           default_name=get_default_scope_name('prob', self)):
            return tf.exp(self.log_prob(given, group_ndims=group_ndims))
Beispiel #8
0
    def local_log_probs(self, names):
        """
        Get the log-densities of stochastic nodes.

        Args:
            names (Iterable[str]): Names of the queried stochastic nodes.

        Returns:
            list[tf.Tensor]: Log-densities of the queried stochastic nodes.

        Raises:
            KeyError: If non-exist name is queried.
        """
        names = self._check_names_exist(names)
        ret = []
        for name in names:
            ns = '{}.log_prob'.format(get_default_scope_name(name))
            ret.append(self._stochastic_tensors[name].log_prob(name=ns))
        return ret
Beispiel #9
0
    def apply(self, input):
        """
        Apply the layer on `input`, to produce output.

        Args:
            input (Tensor or list[Tensor]): The input tensor, or a list of
                input tensors.

        Returns:
            The output tensor, or a list of output tensors.
        """
        if is_tensor_object(input) or isinstance(input, np.ndarray):
            input = tf.convert_to_tensor(input)
            ns_values = [input]
        else:
            input = [tf.convert_to_tensor(i) for i in input]
            ns_values = input

        if not self._has_built:
            self.build(input)

        with tf.name_scope(get_default_scope_name('apply', self),
                           values=ns_values):
            return self._apply(input)
Beispiel #10
0
    def inverse_transform(self,
                          y,
                          compute_x=True,
                          compute_log_det=True,
                          name=None):
        """
        Transform `y` into `x`, and compute the log-determinant of `f^{-1}` at
        `y` (i.e.,
        :math:`\\log \\det \\frac{\\partial f^{-1}(y)}{\\partial y}`).

        Args:
            y (Tensor): The samples of `y`.
            compute_x (bool): Whether or not to compute :math:`x = f^{-1}(y)`?
                Default :obj:`True`.
            compute_log_det (bool): Whether or not to compute the
                log-determinant?  Default :obj:`True`.
            name (str): If specified, will use this name as the TensorFlow
                operational name scope.

        Returns:
            (tf.Tensor, tf.Tensor): `x` and the (maybe summed) log-determinant.
                The items in the returned tuple might be :obj:`None`
                if corresponding `compute_?` argument is set to :obj:`False`.

        Raises:
            RuntimeError: If both `compute_x` and `compute_log_det` are set
                to :obj:`False`.
            RuntimeError: If the flow is not explicitly invertible.
        """
        if not self.explicitly_invertible:
            raise RuntimeError(
                'The flow is not explicitly invertible: {!r}'.format(self))
        if not compute_x and not compute_log_det:
            raise ValueError('At least one of `compute_x` and '
                             '`compute_log_det` should be True.')
        if not self._has_built:
            raise RuntimeError('`inverse_transform` cannot be called before '
                               'the flow has been built; it can be built by '
                               'calling `build`, `apply` or `transform`: '
                               '{!r}'.format(self))

        y = tf.convert_to_tensor(y)
        y = self._y_input_spec.validate('y', y)

        with tf.name_scope(name,
                           default_name=get_default_scope_name(
                               'inverse_transform', self),
                           values=[y]):
            x, log_det = self._inverse_transform(y, compute_x, compute_log_det)

            if compute_log_det:
                with assert_deps([
                        assert_log_det_shape_matches_input(
                            log_det=log_det,
                            input=y,
                            value_ndims=self.y_value_ndims)
                ]) as asserted:
                    if asserted:  # pragma: no cover
                        log_det = tf.identity(log_det)

            return x, log_det
Beispiel #11
0
def shifted_conv2d(input,
                   out_channels,
                   kernel_size,
                   spatial_shift,
                   strides=(1, 1),
                   channels_last=True,
                   conv_fn=conv2d,
                   name=None,
                   scope=None,
                   **kwargs):
    """
    2D convolution with shifted input.

    This method first pads `input` according to the `kernel_size` and
    `spatial_shift` arguments, then do 2D convolution (using `conv_fn`)
    with "VALID" padding.

    Args:
        input (Tensor): The input tensor, at least 4-d.
        out_channels (int): The channel numbers of the output.
        kernel_size (int or (int, int)): Kernel size over spatial dimensions.
        spatial_shift: The `spatial_shift` should be a tuple with two elements
            (corresponding to height and width spatial axes), and the elements
            can only be -1, 0 or 1.

            If the shift for a specific axis is `-1`, then `kernel_size - 1`
            zeros will be padded at the end of that axis.
            If the shift is `0`, then `(kernel_size - 1) // 2` zeros will be
            padded at the front, and `kernel_size // 2` zeros will be padded
            at the end that axis.
            Otherwise if the shift is `1`, then `kernel_size + 1` zeros will
            be padded at the front of that axis.
        strides (int or (int, int)): Strides over spatial dimensions.
        channels_last (bool): Whether or not the channel axis is the last
            axis in `input`? (i.e., the data format is "NHWC")
        conv_fn: The 2D convolution function. (default :func:`conv2d`)
        \\**kwargs: Other named parameters passed to `conv_fn`.

    Returns:
        tf.Tensor: The output tensor.
    """
    spatial_shift = tuple(spatial_shift)
    if len(spatial_shift) != 2 or \
            any(s not in (-1, 0, 1) for s in spatial_shift):
        raise TypeError('`spatial_shift` must be a tuple with two elements, '
                        'and the elements can only be -1, 0 or 1.')
    kernel_size = validate_conv2d_size_tuple('kernel_size', kernel_size)
    if 'padding' in kwargs:
        raise ValueError('`padding` argument is not supported.')
    input, _, _ = validate_conv2d_input(input, channels_last=channels_last)

    rank = len(get_static_shape(input))
    pads = [(0, 0)] * rank

    is_shifted_conv2d = False
    spatial_start = -3 if channels_last else -2
    for i, (ksize, shift) in enumerate(zip(kernel_size, spatial_shift)):
        axis = i + spatial_start
        if shift == 0:
            pads[axis] = ((ksize - 1) // 2, ksize // 2)
        elif shift == -1:
            pads[axis] = (0, ksize - 1)
            is_shifted_conv2d = True
        else:
            assert (shift == 1)
            pads[axis] = (ksize - 1, 0)
            is_shifted_conv2d = True

    # fast routine: no shift, use ordinary conv_fn with padding == 'SAME'
    if not is_shifted_conv2d:
        return conv_fn(input=input,
                       out_channels=out_channels,
                       kernel_size=kernel_size,
                       strides=strides,
                       channels_last=channels_last,
                       padding='SAME',
                       scope=scope,
                       name=name,
                       **kwargs)

    # slow routine: pad and use conv_fn with padding == 'VALID'
    with tf.variable_scope(scope, default_name=name or 'shifted_conv2d'):
        output = tf.pad(input, pads)
        output = conv_fn(input=output,
                         out_channels=out_channels,
                         kernel_size=kernel_size,
                         strides=strides,
                         channels_last=channels_last,
                         padding='VALID',
                         scope=get_default_scope_name(
                             getattr(conv_fn, '__name__', None) or 'conv_fn'),
                         **kwargs)
        return output