Exemplo n.º 1
0
    def build(self):
        D = np.prod(self._event_dims)

        flow = []
        for i in range(self._num_coupling_layers):
            if self._use_batch_normalization:
                batch_normalization_bijector = bijectors.BatchNormalization()
                flow.append(batch_normalization_bijector)

            real_nvp_bijector = bijectors.RealNVP(
                num_masked=D // 2,
                shift_and_log_scale_fn=conditioned_real_nvp_template(
                    hidden_layers=self._hidden_layer_sizes,
                    # TODO: test tf.nn.relu
                    activation=tf.nn.tanh),
                name='real_nvp_{}'.format(i))

            flow.append(real_nvp_bijector)

            if i < self._num_coupling_layers - 1:
                permute_bijector = bijectors.Permute(
                    permutation=list(reversed(range(D))),
                    name='permute_{}'.format(i))
                # TODO(hartikainen): We need to force _is_constant_jacobian due
                # to the event_dim caching. See the issue filed at github:
                # https://github.com/tensorflow/probability/issues/122
                permute_bijector._is_constant_jacobian = False
                flow.append(permute_bijector)

        # Note: bijectors.Chain applies the list of bijectors in the
        # _reverse_ order of what they are inputted.
        self.flow = flow
Exemplo n.º 2
0
Arquivo: flow.py Projeto: gumpfly/PSVO
    def init_bijectors(self, n_layers, hidden_layers):
        with tf.variable_scope(self.name):
            bijectors = []
            for i in range(n_layers):
                if self.flow_type == "MAF":
                    bijectors.append(
                        tfb.MaskedAutoregressiveFlow(
                            shift_and_log_scale_fn=tfb.
                            masked_autoregressive_default_template(
                                hidden_layers=hidden_layers,
                                activation=tf.nn.relu,
                                log_scale_min_clip=self.log_scale_min_clip,
                                log_scale_max_clip=self.log_scale_max_clip,
                                shift_only=self.shift_only,
                                log_scale_clip_gradient=self.
                                log_scale_clip_gradient,
                                name="MAF_template_{}".format(i)),
                            name="MAF_{}".format(i)))
                elif self.flow_type == "IAF":
                    bijectors.append(
                        tfb.Invert(tfb.MaskedAutoregressiveFlow(
                            shift_and_log_scale_fn=tfb.
                            masked_autoregressive_default_template(
                                hidden_layers=hidden_layers,
                                activation=tf.nn.relu,
                                log_scale_min_clip=self.log_scale_min_clip,
                                log_scale_max_clip=self.log_scale_max_clip,
                                shift_only=self.shift_only,
                                log_scale_clip_gradient=self.
                                log_scale_clip_gradient,
                                name="MAF_template_{}".format(i))),
                                   name="IAF_{}".format(i)))
                elif self.flow_type == "RealNVP":
                    bijectors.append(
                        tfb.RealNVP(num_masked=self.event_size - 1,
                                    shift_and_log_scale_fn=tfb.
                                    real_nvp_default_template(
                                        hidden_layers=hidden_layers,
                                        activation=tf.nn.relu,
                                        shift_only=self.shift_only,
                                        name="RealNVP_template_{}".format(i)),
                                    name="RealNVP_{}".format(i)))
                else:
                    raise ValueError("Unknown flow type {}".format(
                        self.flow_type))
                bijectors.append(
                    tfb.Permute(permutation=list(range(1, self.event_size)) +
                                [0]))
                # bijectors.append(
                #     tfb.Permute(
                #         self.init_once(np.random.permutation(self.event_size).astype("int32"),
                #                        name="permutation_{}".format(i))
                #     )
                # )

            flow_bijector = tfb.Chain(list(reversed(bijectors[:-1])),
                                      validate_args=True,
                                      name="NF_chain")

            return flow_bijector
Exemplo n.º 3
0
    def _build(self, input_shape):
        input_depth = tf.compat.dimension_value(
            tensorshape_util.with_rank_at_least(input_shape, 1)[-1])

        self._input_depth = input_depth

        flow_parts = []
        for i in range(self._num_coupling_layers):
            if self._use_batch_normalization:
                batch_normalization_bijector = bijectors.BatchNormalization()
                flow_parts += [batch_normalization_bijector]

            real_nvp_bijector = bijectors.RealNVP(
                num_masked=input_depth // 2,
                shift_and_log_scale_fn=feedforward_scale_and_log_diag_fn(
                    hidden_layer_sizes=self._hidden_layer_sizes,
                    activation=tf.nn.relu),
                name='real_nvp_{}'.format(i))
            flow_parts += [real_nvp_bijector]

            if i < self._num_coupling_layers - 1:
                permute_bijector = bijectors.Permute(
                    permutation=list(reversed(range(input_depth))),
                    name='permute_{}'.format(i))
                flow_parts += [permute_bijector]

        # bijectors.Chain applies the list of bijectors in the
        # _reverse_ order of what they are inputted, thus [::-1].
        self.flow = bijectors.Chain(flow_parts[::-1])
        self._built = True
Exemplo n.º 4
0
    def _build(self, input_shape):
        input_depth = tf.compat.dimension_value(
            tensorshape_util.with_rank_at_least(input_shape, 1)[-1])

        self._input_depth = input_depth

        flow_parts = []
        for i in range(self._num_coupling_layers):
            if self._use_batch_normalization:
                # TODO(hartikainen): Allow other normalizations, e.g.
                # weight normalization?
                batch_normalization_bijector = bijectors.BatchNormalization()
                flow_parts += [batch_normalization_bijector]

            real_nvp_bijector = bijectors.RealNVP(
                fraction_masked={
                    True: 1.0,
                    False: -1.0
                }[i % 2 == 0] * 0.5,
                bijector_fn=FeedforwardBijectorFunction(
                    hidden_layer_sizes=self._hidden_layer_sizes,
                    activation=self._activation),
                name=f'real_nvp_{i}')
            flow_parts += [real_nvp_bijector]

        # bijectors.Chain applies the list of bijectors in the
        # _reverse_ order of what they are inputted, thus [::-1].
        self.flow = bijectors.Chain(flow_parts[::-1])
        self._built = True