Example #1
0
    def variational(self, x, z=None, n_z=None):
        """
        Derive an instance of :math:`q(z|h(x))`, the variational net.

        Args:
            x: The observation `x` for the variational net.
            z: If specified, observe `z` in the variational net.
                (default :obj:`None`)
            n_z: The number of `z` samples to take for each `x`, if `z`
                is not observed. (default :obj:`None`, one sample for
                each `x`, without dedicated sampling dimension)

                It is recommended to specify this argument even if `z`
                is observed, to make explicit how many samples are there
                in the observation.

        Returns:
            BayesianNet: The variational net.
        """
        observed = {}
        if z is not None:
            observed['z'] = z
        net = BayesianNet(observed=observed)
        with tf.variable_scope('h_for_q_z'):
            z_params = self.h_for_q_z(x)
        with tf.variable_scope('q_z_given_x'):
            q_z_given_x = self.q_z_given_x(z_params)
            assert (isinstance(q_z_given_x, Distribution))
        with tf.name_scope('z'):
            z = net.add('z',
                        q_z_given_x,
                        n_samples=n_z,
                        group_ndims=self.z_group_ndims,
                        is_reparameterized=self.is_reparameterized)
        return net
Example #2
0
    def test_local_log_prob(self):
        x_observed = np.arange(24, dtype=np.float32).reshape([2, 3, 4])
        net = BayesianNet({'x': x_observed})
        normal = Normal(tf.zeros([3, 4]), tf.ones([3, 4]))
        x = net.add('x', normal)
        y = net.add('y', normal)

        # test single query
        x_log_prob = net.local_log_prob('x')
        self.assertIsInstance(x_log_prob, tf.Tensor)
        with self.test_session():
            np.testing.assert_allclose(x_log_prob.eval(),
                                       normal.log_prob(x_observed).eval())

        # test multiple query
        x_log_prob, y_log_prob = net.local_log_probs(iter(['x', 'y']))
        self.assertIsInstance(x_log_prob, tf.Tensor)
        self.assertIsInstance(y_log_prob, tf.Tensor)
        with self.test_session() as sess:
            np.testing.assert_allclose(x_log_prob.eval(),
                                       normal.log_prob(x_observed).eval())
            x_log_prob_val, x_log_prob_res, y_log_prob_val, y_log_prob_res = \
                sess.run([
                    x_log_prob, normal.log_prob(x.tensor),
                    y_log_prob, normal.log_prob(y.tensor),
                ])
            np.testing.assert_allclose(x_log_prob_val, x_log_prob_res)
            np.testing.assert_allclose(y_log_prob_val, y_log_prob_res)
Example #3
0
def p_net(observed=None, n_z=None, is_training=True):
    logging.info('p_net builder: %r', locals())

    net = BayesianNet(observed=observed)

    # sample z ~ p(z)
    z = net.add('z',
                Normal(mean=tf.zeros([1, config.z_dim]),
                       logstd=tf.zeros([1, config.z_dim])),
                group_ndims=1,
                n_samples=n_z)

    # compute the hidden features
    with arg_scope([dense],
                   activation_fn=tf.nn.leaky_relu,
                   kernel_regularizer=l2_regularizer(config.l2_reg)):
        h_x, s1, s2 = flatten(z, 2)
        h_x = dense(h_x, 500)
        h_x = dense(h_x, 500)

    # sample x ~ p(x|z)
    x_logits = unflatten(dense(h_x, config.x_dim, name='x_logits'), s1, s2)
    x = net.add('x', Bernoulli(logits=x_logits), group_ndims=1)

    return net
Example #4
0
    def model(self, z=None, x=None, x_feature=None, n_z=None, n_x=None):
        """
        Derive an instance of :math:`p(x|h(z))`, the model net.

        Args:
            z: If specified, observe `z` in the model net. (default :obj:`None`)
            x: If specified, observe `x` in the model net. (default :obj:`None`)
            n_z: The number of `z` samples to take for each `x`, if `z`
                is not observed. (default :obj:`None`, one `z` sample for
                each `x`, without dedicated sampling dimension)

                It is recommended to specify this argument even if `z`
                is observed, to make explicit how many samples are there
                in the observation.
            n_x: The number of `x` samples to take for each `z`, if `x`
                is not observed. (default :obj:`None`, one `x` sample for
                each `z`, without dedicated sampling dimension)

                It is recommended to specify this argument even if `x`
                is observed, to make explicit how many samples are there
                in the observation.

        Returns:
            BayesianNet: The variational net.
        """
        # 'x':(?,100,38); 'z':(?,100,3)
        observed = {
            k: v
            for k, v in [('z', z), ('x', x), ('x_feature', x_feature)]
            if v is not None
        }
        net = BayesianNet(
            observed=observed)  # 空net中添加observe('x','z','x_feature')
        with tf.name_scope('z'):
            # 已有'z'的观察值, 并作为参数直接生成tensor(?,100,3), 且更新了self._stochastic_tensors['z'] = tensor
            z = net.add('z',
                        self.p_z,
                        n_samples=n_z,
                        group_ndims=self.z_group_ndims,
                        is_reparameterized=self.is_reparameterized)
        with tf.variable_scope('h_for_p_x'):
            # x_params:{'mean':(?,100,38),'std':(?,100,38)}
            # if self.with_conditional:
            #     if n_z is not None:
            #         x_feature = tf.tile(tf.expand_dims(x_feature, axis=0), [n_z, 1, 1, 1])
            #     x_params = self.h_for_p_x(tf.concat([z, x_feature], axis=-1))
            # else:
            #     x_params = self.h_for_p_x(z)
            x_params = self.h_for_p_x(z)
        with tf.variable_scope('p_x_given_z'):
            p_x_given_z = self.p_x_given_z(**x_params)
            assert (isinstance(p_x_given_z, Distribution))
        with tf.name_scope('x'):
            x = net.add('x',
                        p_x_given_z,
                        n_samples=n_x,
                        group_ndims=self.x_group_ndims)
        return net
Example #5
0
def q_net(x, observed=None, n_samples=None, tau=None, is_training=True):
    use_concrete = config.use_concrete_distribution and tau is not None
    logging.info('q_net builder: %r', locals())

    net = BayesianNet(observed=observed)

    # compute the hidden features
    with arg_scope([dense],
                   activation_fn=tf.nn.leaky_relu,
                   kernel_regularizer=l2_regularizer(config.l2_reg)):
        h_x = tf.to_float(x)
        h_x = dense(h_x, 500)
        h_x = dense(h_x, 500)

    # sample y ~ q(y|x)
    y_logits = dense(h_x, config.n_clusters, name='y_logits')
    if use_concrete:
        y = net.add('y',
                    ExpConcrete(tau, y_logits),
                    is_reparameterized=True,
                    n_samples=n_samples)
        y_one_hot = tf.exp(y)
    else:
        y = net.add('y', Categorical(y_logits), n_samples=n_samples)
        y_one_hot = tf.one_hot(y, config.n_clusters, dtype=tf.float32)

    # sample z ~ q(z|y,x)
    with arg_scope([dense],
                   activation_fn=tf.nn.leaky_relu,
                   kernel_regularizer=l2_regularizer(config.l2_reg)):
        if config.mean_field_assumption_for_q:
            # by mean-field-assumption we let q(z|y,x) = q(z|x)
            h_z, s1, s2 = flatten(h_x, 2)
            z_n_samples = n_samples
        else:
            if n_samples is not None:
                h_z = tf.concat([
                    tf.tile(tf.reshape(h_x, [1, -1, 500]),
                            tf.stack([n_samples, 1, 1])), y_one_hot
                ],
                                axis=-1)
            else:
                h_z = tf.concat([h_x, y_one_hot], axis=-1)
            h_z, s1, s2 = flatten(h_z, 2)
            h_z = dense(h_z, 500)
            z_n_samples = None

    z_mean = dense(h_z, config.z_dim, name='z_mean')
    z_logstd = dense(h_z, config.z_dim, name='z_logstd')
    z = net.add('z',
                Normal(mean=unflatten(z_mean, s1, s2),
                       logstd=unflatten(z_logstd, s1, s2),
                       is_reparameterized=use_concrete),
                n_samples=z_n_samples,
                group_ndims=1)

    return net
Example #6
0
    def test_add(self):
        x_observed = np.arange(24, dtype=np.float32).reshape([2, 3, 4])
        net = BayesianNet({'x': x_observed})
        self.assertNotIn('x', net)
        self.assertNotIn('y', net)

        # add an observed node
        x = net.add('x',
                    Normal(tf.zeros([3, 4]), tf.ones([3, 4])),
                    n_samples=2,
                    group_ndims=1,
                    is_reparameterized=False)
        self.assertIs(net.get('x'), x)
        self.assertIs(net['x'], x)
        self.assertIn('x', net)
        self.assertListEqual(list(net), ['x'])

        self.assertIsInstance(x, StochasticTensor)
        self.assertEqual(x.n_samples, 2)
        self.assertEqual(x.group_ndims, 1)
        self.assertEqual(x.is_reparameterized, False)
        with self.test_session():
            np.testing.assert_allclose(x.eval(), x_observed)
            np.testing.assert_equal(tf.shape(x).eval(), [2, 3, 4])

        # add an unobserved node
        y = net.add('y',
                    Normal(tf.zeros([3, 4]), tf.ones([3, 4])),
                    n_samples=2,
                    group_ndims=1,
                    is_reparameterized=False)
        self.assertIs(net.get('y'), y)
        self.assertIs(net['y'], y)
        self.assertIn('y', net)
        self.assertListEqual(list(net), ['x', 'y'])

        self.assertIsInstance(y, StochasticTensor)
        self.assertEqual(y.n_samples, 2)
        self.assertEqual(y.group_ndims, 1)
        self.assertEqual(y.is_reparameterized, False)
        with self.test_session():
            np.testing.assert_equal(tf.shape(y).eval(), [2, 3, 4])

        # error adding non-string name
        with pytest.raises(TypeError, match='`name` must be a str'):
            _ = net.add(1, Normal(0., 1.))

        # error adding the same variable
        with pytest.raises(
                KeyError,
                match='StochasticTensor with name \'x\' already exists '
                'in the BayesianNet.  Names must be unique.'):
            _ = net.add('x', Normal(2., 3.))

        # default is_reparameterized of Normal
        z = net.add('z', Normal(0., 1.))
        self.assertTrue(z.is_reparameterized)
Example #7
0
    def test_observed_dict(self):
        # test no observations
        net = BayesianNet()
        self.assertEqual(net.observed, {})
        with pytest.raises(Exception,
                           message='`net.observed` should be read-only'):
            net.observed['x'] = 1

        # test feeding observed with dict
        net = BayesianNet({'x': 1, 'y': 2})
        self.assertEqual(set(net.observed), {'x', 'y'})
        with self.test_session() as sess:
            self.assertListEqual(
                sess.run([net.observed['x'], net.observed['y']]), [1, 2])
Example #8
0
    def test_add_transform(self):
        class PatchedNormal(Normal):
            def sample(self,
                       n_samples=None,
                       group_ndims=0,
                       is_reparameterized=None,
                       name=None):
                return StochasticTensor(
                    self,
                    x_samples,
                    n_samples=n_samples,
                    group_ndims=group_ndims,
                    is_reparameterized=is_reparameterized,
                )

        net = BayesianNet({'w': tf.constant(0.)})
        x_samples = tf.reshape(tf.range(24, dtype=tf.float32), [2, 3, 4])
        normal = PatchedNormal(tf.zeros([3, 4]), tf.ones([3, 4]))

        # test success call
        x = net.add('x',
                    normal,
                    n_samples=2,
                    group_ndims=1,
                    transform=lambda x, log_p: (x * 2., log_p * .5))
        self.assertIsInstance(x.distribution, TransformedDistribution)
        self.assertEquals(1, x.group_ndims)
        self.assertEquals(2, x.n_samples)
        self.assertTrue(x.is_reparameterized)

        with self.test_session() as sess:
            np.testing.assert_allclose(*sess.run([x_samples * 2., x]))
            np.testing.assert_allclose(*sess.run(
                [normal.log_prob(x_samples, group_ndims=1) * .5,
                 x.log_prob()]))

        # test errors
        I = lambda x, log_p: (x, log_p)
        with pytest.raises(TypeError,
                           match='Cannot add `TransformedDistribution`'):
            _ = net.add('y', x.distribution)
        with pytest.raises(ValueError,
                           match='`transform` can only be applied on '
                           'continuous, re-parameterized variables'):
            _ = net.add('y', Categorical([0.], dtype=tf.int32), transform=I)
        with pytest.raises(ValueError,
                           match='`transform` can only be applied on '
                           'continuous, re-parameterized variables'):
            _ = net.add('y',
                        ExpConcrete(.5, [0.], is_reparameterized=False),
                        transform=I)
        with pytest.raises(ValueError,
                           match='`observed` variable cannot be transformed.'):
            _ = net.add('w', Normal(mean=0., std=0.), transform=I)
        with pytest.raises(ValueError,
                           match='The transformed samples must be continuous'):
            T = lambda x, log_p: (tf.cast(x, dtype=tf.int32), log_p)
            _ = net.add('y', normal, transform=T)
Example #9
0
def q_net(config, x, observed=None, n_z=None, is_training=True):
    net = BayesianNet(observed=observed)

    # compute the hidden features
    with arg_scope([dense],
                   activation_fn=tf.nn.leaky_relu,
                   kernel_regularizer=l2_regularizer(config.l2_reg)):
        h_x = tf.to_float(x)
        h_x = dense(h_x, 500)
        h_x = dense(h_x, 500)

    # sample z ~ q(z|x)
    z_logits = dense(h_x, config.z_dim, name='z_logits')
    z = net.add('z', Bernoulli(logits=z_logits), n_samples=n_z, group_ndims=1)

    return net
Example #10
0
    def test_validate_names(self):
        net = BayesianNet({'x': [2., 3., 4.]})
        x = net.add('x', Normal(0., 1.))
        y = net.add('y', Normal(0., 1.))

        for meth in ['output', 'local_log_prob']:
            with pytest.raises(TypeError, match='`names` is not a list of str'):
                _ = getattr(net, meth)(1)
            with pytest.raises(KeyError, match='StochasticTensor with name '
                                               '\'z\' does not exist'):
                _ = getattr(net, meth)('z')
        for meth in ['outputs', 'local_log_probs', 'query']:
            with pytest.raises(TypeError, match='`names` is not a list of str'):
                _ = getattr(net, meth)([1, 2])
            with pytest.raises(KeyError, match='StochasticTensor with name '
                                               '\'z\' does not exist'):
                _ = getattr(net, meth)(['x', 'y', 'z'])
Example #11
0
    def test_add_with_flow(self):
        normal = Normal(mean=tf.constant([0., 1., 2.]), std=1.)
        flow = QuadraticFlow(2., 5.)

        # test add with sample
        net = BayesianNet()
        x = net.add('x', normal, flow=flow)
        self.assertIsInstance(x.distribution, FlowDistribution)
        self.assertIs(x.distribution.flow, flow)

        # ensure non-invertible flow cannot be added with observed var
        class _Flow(BaseFlow):
            @property
            def explicitly_invertible(self):
                return False

        net = BayesianNet({'x': tf.zeros([5, 3])})
        with pytest.raises(TypeError,
                           match='The observed variable \'x\' expects `flow` '
                           'to be explicitly invertible, but it is not'):
            _ = net.add('x', normal, flow=_Flow(x_value_ndims=0))

        # test add observed with flow
        x = net.add('x', normal, flow=flow)
        self.assertIsInstance(x.distribution, FlowDistribution)
        self.assertIs(x.distribution.flow, flow)
Example #12
0
    def model(self, z=None, x=None, n_z=None, n_x=None):
        """
        Derive an instance of :math:`p(x|h(z))`, the model net.

        Args:
            z: If specified, observe `z` in the model net. (default :obj:`None`)
            x: If specified, observe `x` in the model net. (default :obj:`None`)
            n_z: The number of `z` samples to take for each `x`, if `z`
                is not observed. (default :obj:`None`, one `z` sample for
                each `x`, without dedicated sampling dimension)

                It is recommended to specify this argument even if `z`
                is observed, to make explicit how many samples are there
                in the observation.
            n_x: The number of `x` samples to take for each `z`, if `x`
                is not observed. (default :obj:`None`, one `x` sample for
                each `z`, without dedicated sampling dimension)

                It is recommended to specify this argument even if `x`
                is observed, to make explicit how many samples are there
                in the observation.

        Returns:
            BayesianNet: The variational net.
        """
        observed = {k: v for k, v in [('z', z), ('x', x)] if v is not None}
        net = BayesianNet(observed=observed)
        with tf.name_scope('z'):
            z = net.add('z',
                        self.p_z,
                        n_samples=n_z,
                        group_ndims=self.z_group_ndims,
                        is_reparameterized=self.is_reparameterized)
        with tf.variable_scope('h_for_p_x'):
            x_params = self.h_for_p_x(z)
        with tf.variable_scope('p_x_given_z'):
            p_x_given_z = self.p_x_given_z(**x_params)
            assert (isinstance(p_x_given_z, Distribution))
        with tf.name_scope('x'):
            x = net.add('x',
                        p_x_given_z,
                        n_samples=n_x,
                        group_ndims=self.x_group_ndims)
        return net
Example #13
0
def q_net(config,
          x,
          observed=None,
          n_z=None,
          is_training=True,
          channels_last=False):
    net = BayesianNet(observed=observed)

    # compute the hidden features
    normalizer_fn = None if not config.batch_norm else functools.partial(
        batch_norm_2d,
        channels_last=channels_last,
        training=is_training,
    )
    dropout_fn = None if not config.dropout else functools.partial(
        tf.layers.dropout, training=is_training)

    with arg_scope([resnet_block],
                   shortcut_kernel_size=config.shortcut_kernel_size,
                   activation_fn=tf.nn.leaky_relu,
                   normalizer_fn=normalizer_fn,
                   dropout_fn=dropout_fn,
                   kernel_regularizer=l2_regularizer(config.l2_reg),
                   channels_last=channels_last):
        h_x = tf.to_float(x)
        h_x = tf.reshape(h_x,
                         [-1, 28, 28, 1] if channels_last else [-1, 1, 28, 28])
        h_x = resnet_block(h_x, 16)  # output: (16, 28, 28)
        h_x = resnet_block(h_x, 32, strides=2)  # output: (32, 14, 14)
        h_x = resnet_block(h_x, 32)  # output: (32, 14, 14)
        h_x = resnet_block(h_x, 64, strides=2)  # output: (64, 7, 7)
        h_x = resnet_block(h_x, 64)  # output: (64, 7, 7)
    h_x = reshape_conv2d_to_flat(h_x)

    # sample z ~ q(z|x)
    z_mean = dense(h_x, config.z_dim, name='z_mean')
    z_logstd = dense(h_x, config.z_dim, name='z_logstd')
    z = net.add('z',
                Normal(mean=z_mean, logstd=z_logstd),
                n_samples=n_z,
                group_ndims=1)

    return net
Example #14
0
    def variational(self, x, x_feature, z=None, n_z=None, posterior_flow=None):
        """
        Derive an instance of :math:`q(z|h(x))`, the variational net.

        Args:
            x: The observation `x` for the variational net.
            z: If specified, observe `z` in the variational net.
                (default :obj:`None`)
            n_z: The number of `z` samples to take for each `x`, if `z`
                is not observed. (default :obj:`None`, one sample for
                each `x`, without dedicated sampling dimension)

                It is recommended to specify this argument even if `z`
                is observed, to make explicit how many samples are there
                in the observation.

        Returns:
            BayesianNet: The variational net.
        """
        observed = {}
        if z is not None:
            observed['z'] = z
        net = BayesianNet(observed=observed)
        with tf.variable_scope('h_for_q_z'):
            if self.with_conditional:
                z_params = self.h_for_q_z(tf.concat([x, x_feature], axis=-1))
            else:
                z_params = self.h_for_q_z(x)
            #(rnn输出input_q, batch*window*rnn_num_hidden)
            #x:(?,100,38) ;z_params: {'input_q':tf.tensor(?,100,500)}
        with tf.variable_scope('q_z_given_x'):
            q_z_given_x = self.q_z_given_x(
                **z_params)  #imput_q+均值和方差,传给distribution,得到实例
            assert (isinstance(q_z_given_x, Distribution))
        with tf.name_scope('z'):
            z = net.add(
                'z',
                q_z_given_x,
                n_samples=n_z,  #采样后的z,batch_size * time_step *  z_dim
                group_ndims=self.z_group_ndims,
                is_reparameterized=self.is_reparameterized,
                flow=posterior_flow)  # TODO
        return net
Example #15
0
def q_net(config, x, observed=None, n_z=None, is_training=True):
    net = BayesianNet(observed=observed)

    # compute the hidden features
    with arg_scope([dense],
                   activation_fn=tf.nn.leaky_relu,
                   kernel_regularizer=l2_regularizer(config.l2_reg)):
        h_x = tf.to_float(x)
        h_x = dense(h_x, 500)
        h_x = dense(h_x, 500)

    # sample z ~ q(z|x)
    z_mean = dense(h_x, config.z_dim, name='z_mean')
    z_logstd = dense(h_x, config.z_dim, name='z_logstd')
    z = net.add('z',
                Normal(mean=z_mean, logstd=z_logstd),
                n_samples=n_z,
                group_ndims=1,
                flow=posterior_flow())

    return net
Example #16
0
def p_net(config,
          observed=None,
          n_z=None,
          is_training=True,
          channels_last=False):
    net = BayesianNet(observed=observed)

    # sample z ~ p(z)
    z = net.add('z',
                Normal(mean=tf.zeros([1, config.z_dim]),
                       logstd=tf.zeros([1, config.z_dim])),
                group_ndims=1,
                n_samples=n_z)

    # compute the hidden features
    with arg_scope([deconv_resnet_block],
                   shortcut_kernel_size=config.shortcut_kernel_size,
                   activation_fn=tf.nn.leaky_relu,
                   kernel_regularizer=l2_regularizer(config.l2_reg),
                   channels_last=channels_last):
        h_z, s1, s2 = flatten(z, 2)
        h_z = tf.reshape(dense(h_z, 64 * 7 * 7),
                         [-1, 7, 7, 64] if channels_last else [-1, 64, 7, 7])
        h_z = deconv_resnet_block(h_z, 64)  # output: (64, 7, 7)
        h_z = deconv_resnet_block(h_z, 32, strides=2)  # output: (32, 14, 14)
        h_z = deconv_resnet_block(h_z, 32)  # output: (32, 14, 14)
        h_z = deconv_resnet_block(h_z, 16, strides=2)  # output: (16, 28, 28)
    h_z = conv2d(h_z,
                 1, (1, 1),
                 padding='same',
                 name='feature_map_to_pixel',
                 channels_last=channels_last)  # output: (1, 28, 28)
    h_z = tf.reshape(h_z, [-1, config.x_dim])

    # sample x ~ p(x|z)
    x_logits = unflatten(h_z, s1, s2)
    x = net.add('x', Bernoulli(logits=x_logits), group_ndims=1)

    return net
Example #17
0
    def test_query(self):
        x_observed = np.arange(24, dtype=np.float32).reshape([2, 3, 4])
        net = BayesianNet({'x': x_observed})
        normal = Normal(tf.zeros([3, 4]), tf.ones([3, 4]))
        x = net.add('x', normal)
        y = net.add('y', normal)

        [(x_out, x_log_prob), (y_out, y_log_prob)] = net.query(iter(['x', 'y']))
        for o in [x_out, x_log_prob, y_out, y_log_prob]:
            self.assertIsInstance(o, tf.Tensor)
        self.assertIs(x_out, x.tensor)
        self.assertIs(y_out, y.tensor)
        with self.test_session() as sess:
            np.testing.assert_allclose(
                x_log_prob.eval(), normal.log_prob(x_observed).eval())
            x_log_prob_val, x_log_prob_res, y_log_prob_val, y_log_prob_res = \
                sess.run([
                    x_log_prob, normal.log_prob(x.tensor),
                    y_log_prob, normal.log_prob(y.tensor),
                ])
            np.testing.assert_allclose(x_log_prob_val, x_log_prob_res)
            np.testing.assert_allclose(y_log_prob_val, y_log_prob_res)
Example #18
0
    def test_outputs(self):
        x_observed = np.arange(24, dtype=np.float32).reshape([2, 3, 4])
        net = BayesianNet({'x': x_observed})
        normal = Normal(tf.zeros([3, 4]), tf.ones([3, 4]))
        x = net.add('x', normal)
        y = net.add('y', normal)

        # test single query
        x_out = net.output('x')
        self.assertIs(x_out, x.tensor)
        self.assertIsInstance(x_out, tf.Tensor)
        with self.test_session():
            np.testing.assert_equal(x_out.eval(), x_observed)

        # test multiple query
        x_out, y_out = net.outputs(iter(['x', 'y']))
        self.assertIs(x_out, x.tensor)
        self.assertIs(y_out, y.tensor)
        self.assertIsInstance(x_out, tf.Tensor)
        self.assertIsInstance(y_out, tf.Tensor)
        with self.test_session():
            np.testing.assert_equal(x_out.eval(), x_observed)
Example #19
0
def q_net(x, observed=None, n_z=None, is_training=True):
    logging.info('q_net builder: %r', locals())

    net = BayesianNet(observed=observed)

    # compute the hidden features
    with arg_scope([dense],
                   activation_fn=tf.nn.leaky_relu,
                   kernel_regularizer=l2_regularizer(config.l2_reg)):
        h_z = tf.to_float(x)
        h_z = dense(h_z, 500)
        h_z = dense(h_z, 500)

    # sample z ~ q(z|x)
    z_mean = tf.layers.dense(h_z, config.z_dim, name='z_mean')
    z_logstd = tf.layers.dense(h_z, config.z_dim, name='z_logstd')
    z = net.add('z',
                Normal(mean=z_mean, logstd=z_logstd),
                n_samples=n_z,
                group_ndims=1,
                transform=posterior_flow)

    return net
Example #20
0
def p_net(config, observed=None, n_z=None, is_training=True):
    net = BayesianNet(observed=observed)

    # sample z ~ p(z)
    z = net.add('z',
                Bernoulli(tf.zeros([1, config.z_dim])),
                group_ndims=1,
                n_samples=n_z)

    # compute the hidden features
    with arg_scope([dense],
                   activation_fn=tf.nn.leaky_relu,
                   kernel_regularizer=l2_regularizer(config.l2_reg)):
        z = tf.to_float(z)
        h_z, s1, s2 = flatten(z, 2)
        h_z = dense(h_z, 500)
        h_z = dense(h_z, 500)

    # sample x ~ p(x|z)
    x_logits = unflatten(dense(h_z, config.x_dim, name='x_logits'), s1, s2)
    x = net.add('x', Bernoulli(logits=x_logits), group_ndims=1)

    return net
Example #21
0
def p_net(config,
          observed=None,
          n_y=None,
          n_z=None,
          is_training=True,
          n_samples=None):
    if n_samples is not None:
        warnings.warn('`n_samples` is deprecated, use `n_y` instead.')
        n_y = n_samples

    net = BayesianNet(observed=observed)

    # sample y
    y = net.add('y',
                Categorical(tf.zeros([1, config.n_clusters])),
                n_samples=n_y)

    # sample z ~ p(z|y)
    z = net.add('z',
                gaussian_mixture_prior(y, config.z_dim, config.n_clusters),
                group_ndims=1,
                n_samples=n_z,
                is_reparameterized=False)

    # compute the hidden features for x
    with arg_scope([dense],
                   activation_fn=tf.nn.leaky_relu,
                   kernel_regularizer=l2_regularizer(config.l2_reg)):
        h_x, s1, s2 = flatten(z, 2)
        h_x = dense(h_x, 500)
        h_x = dense(h_x, 500)

    # sample x ~ p(x|z)
    x_logits = unflatten(dense(h_x, config.x_dim, name='x_logits'), s1, s2)
    x = net.add('x', Bernoulli(logits=x_logits), group_ndims=1)

    return net
Example #22
0
    def test_add_is_reparameterized_arg(self):
        with self.test_session() as sess:
            normal = Normal(mean=0., std=1.)

            # test is_reparameterized: False
            with mock.patch('tensorflow.stop_gradient',
                            Mock(wraps=tf.stop_gradient)) as m:
                x = normal.sample(5, is_reparameterized=True)
                self.assertTrue(x.is_reparameterized)
                net = BayesianNet({'x': x.tensor})
                t = net.add('x',
                            Normal(mean=1., std=2.),
                            n_samples=5,
                            is_reparameterized=False)
                self.assertFalse(t.is_reparameterized)
            self.assertTrue(m.call_count, 1)
            self.assertIs(m.call_args[0][0], x.tensor)

            # test inherit is_reparameterized from `x`
            x = normal.sample(5, is_reparameterized=True)
            self.assertTrue(x.is_reparameterized)
            net = BayesianNet({'x': x})
            t = net.add('x', Normal(mean=1., std=2.), n_samples=5)
            self.assertEqual(t.is_reparameterized, x.is_reparameterized)
            np.testing.assert_allclose(*sess.run([x, t]))

            x = normal.sample(5, is_reparameterized=False)
            self.assertFalse(x.is_reparameterized)
            net = BayesianNet({'x': x})
            t = net.add('x', Normal(mean=1., std=2.), n_samples=5)
            self.assertEqual(t.is_reparameterized, x.is_reparameterized)
            np.testing.assert_allclose(*sess.run([x, t]))

            # test override is_reparameterized: True -> False
            with mock.patch('tensorflow.stop_gradient',
                            Mock(wraps=tf.stop_gradient)) as m:
                x = normal.sample(5, is_reparameterized=True)
                self.assertTrue(x.is_reparameterized)
                net = BayesianNet({'x': x})
                t = net.add('x',
                            Normal(mean=1., std=2.),
                            n_samples=5,
                            is_reparameterized=False)
                self.assertFalse(t.is_reparameterized)
            self.assertTrue(m.call_count, 1)
            self.assertIs(m.call_args[0][0], x)

            # test cannot override is_reparameterized: False -> True
            x = normal.sample(5, is_reparameterized=False)
            self.assertFalse(x.is_reparameterized)
            net = BayesianNet({'x': x})
            with pytest.raises(ValueError,
                               match='`is_reparameterized` is True, but the '
                               'observation for `x` is not '
                               're-parameterized'):
                _ = net.add('x',
                            Normal(mean=1., std=2.),
                            n_samples=5,
                            is_reparameterized=True)
Example #23
0
 def model_builder(observed):
     model = BayesianNet(observed)
     z = model.add('z', Normal([0.], [1.]))
     y = model.add('y', Normal([0.], [2.]))
     x = model.add('x', Normal(z + y, [1.]))
     return model
Example #24
0
    def test_variational_chain(self):
        q_net = BayesianNet({'x': [1.]})
        q_net.add('z', Normal(q_net.observed['x'], 1.))
        q_net.add('y', Normal(q_net.observed['x'] * 2, 2.))

        def model_builder(observed):
            model = BayesianNet(observed)
            z = model.add('z', Normal([0.], [1.]))
            y = model.add('y', Normal([0.], [2.]))
            x = model.add('x', Normal(z + y, [1.]))
            return model

        model_builder = Mock(wraps=model_builder)

        # test chain with default parameters
        chain = q_net.variational_chain(model_builder)
        self.assertEqual(model_builder.call_args, (({
            'y': q_net['y'],
            'z': q_net['z']
        }, ), ))
        self.assertEqual(chain.latent_names, ('z', 'y'))
        self.assertIsNone(chain.latent_axis)

        # test chain with latent_names
        chain = q_net.variational_chain(model_builder, latent_names=['y'])
        self.assertEqual(model_builder.call_args, (({'y': q_net['y']}, ), ))
        self.assertEqual(chain.latent_names, ('y', ))

        # test chain with latent_axis
        chain = q_net.variational_chain(model_builder, latent_axis=-1)
        self.assertEqual(chain.latent_axis, -1)

        # test chain with observed
        chain = q_net.variational_chain(model_builder, observed=q_net.observed)
        self.assertEqual(model_builder.call_args, (({
            'x': q_net.observed['x'],
            'y': q_net['y'],
            'z': q_net['z']
        }, ), ))
        self.assertEqual(chain.latent_names, ('z', 'y'))

        # test model_builder with log_joint
        def model_builder_1(observed):
            return model_builder(observed), fake_log_joint

        fake_log_joint = tf.constant(0.)
        chain = q_net.variational_chain(model_builder_1)

        with self.test_session():
            np.testing.assert_equal(chain.log_joint.eval(),
                                    fake_log_joint.eval())