Beispiel #1
0
def bayesianNN(observed, x, n_x, layer_sizes, n_particles):
    with zs.BayesianNet(observed=observed) as model:

        ws = []
        for i, (n_in,
                n_out) in enumerate(zip(layer_sizes[:-1], layer_sizes[1:])):
            w_mu = tf.zeros([1, n_out, n_in + 1])
            ws.append(
                zs.Normal('w' + str(i),
                          w_mu,
                          std=1.,
                          n_samples=n_particles,
                          group_ndims=2))

        # forward
        ly_x = tf.expand_dims(
            tf.tile(tf.expand_dims(x, 0), [n_particles, 1, 1]), 3)
        for i in range(len(ws)):
            w = tf.tile(ws[i], [1, tf.shape(x)[0], 1, 1])
            ly_x = tf.concat(
                [ly_x, tf.ones([n_particles, tf.shape(x)[0], 1, 1])], 2)
            ly_x = tf.matmul(w, ly_x) / tf.sqrt(tf.to_float(tf.shape(ly_x)[2]))
            if i < len(ws) - 1:
                ly_x = tf.nn.relu(ly_x)

        y_mean = tf.squeeze(ly_x, [2, 3])
        y_logstd = tf.get_variable('y_logstd',
                                   shape=[],
                                   initializer=tf.constant_initializer(0.))
        y = zs.Normal('y', y_mean, logstd=y_logstd)

    return model, y_mean
def vae(observed, n, n_z, n_particles):
    with zs.BayesianNet(observed=observed) as model:
        z_mean = tf.zeros([n, n_z])
        z = zs.Normal('z',
                      z_mean,
                      std=1.,
                      n_samples=n_particles,
                      group_ndims=1)
        lx_z = layers.fully_connected(z,
                                      num_outputs=ngf * 8 * 4 * 4,
                                      activation_fn=None)
        lx_z = tf.reshape(lx_z, [-1, 4, 4, ngf * 8])
        lx_z = layers.conv2d_transpose(lx_z, ngf * 4, 5, stride=2)
        lx_z = layers.conv2d_transpose(lx_z, ngf * 2, 5, stride=2)
        lx_z = layers.conv2d_transpose(lx_z, ngf, 5, stride=2)
        lx_z = layers.conv2d_transpose(lx_z,
                                       3,
                                       5,
                                       stride=2,
                                       activation_fn=tf.nn.sigmoid)
        x_mean = tf.reshape(lx_z, [-1, n, 64, 64, 3])
        x_logstd = tf.get_variable("h_logstd",
                                   shape=[64, 64, 3],
                                   dtype=tf.float32,
                                   initializer=tf.constant_initializer(0.))
        x = zs.Normal('x', x_mean, logstd=x_logstd, group_ndims=3)
    return model, x_mean
Beispiel #3
0
def p_net(observed,input_data,market_encode,prev_z,prob,gen_mode = False):
    with zs.BayesianNet(observed=observed) as decoder:
        cat = tf.concat([prev_z,
                        market_encode,
                        input_data],1)
        h_z = tf.layers.dropout(tf.layers.dense(inputs=cat,units=config.LATENT_SIZE,activation = tf.nn.tanh,name='h_z_prior',
                              reuse=tf.AUTO_REUSE),rate = prob)
        z_mean = tf.layers.dropout(tf.layers.dense(inputs=h_z,units=config.LATENT_SIZE,activation = None,name='z_mu_prior',
                                 reuse = tf.AUTO_REUSE),rate = prob)
        z_logstd = tf.layers.dropout(tf.layers.dense(inputs=h_z,units=config.LATENT_SIZE,activation = None,name='z_delta_prior',
                                   reuse = tf.AUTO_REUSE),rate = prob)
        z = zs.Normal(name='z',mean=z_mean,logstd = z_logstd,group_ndims=2,reuse=tf.AUTO_REUSE)
        p_z = zs.Normal(name = 'pz', mean=z_mean,logstd = z_logstd,group_ndims=2,reuse=tf.AUTO_REUSE)
        if gen_mode:
            cat = tf.concat([input_data,
                            market_encode,
                            z_mean],1)
        else:#inference
            cat = tf.concat([input_data,
                            market_encode,
                            z],1)

        g = tf.layers.dropout(tf.layers.dense(inputs=cat, units=config.LATENT_SIZE, name='g', activation=tf.nn.tanh,
                                              reuse = tf.AUTO_REUSE),rate = prob)
        y = tf.layers.dropout(tf.layers.dense(inputs=g, units=2, activation=None,name='y_hat',reuse = tf.AUTO_REUSE),
                              rate = prob)


        return y,g,p_z
Beispiel #4
0
def lntm(observed, n_chains, D, K, V, eta_mean, eta_logstd):
    with zs.BayesianNet(observed=observed) as model:
        eta_mean = tf.tile(tf.expand_dims(eta_mean, 0), [D, 1])
        eta = zs.Normal('eta', eta_mean, logstd=eta_logstd, n_samples=n_chains,
                        group_ndims=1)
        beta = zs.Normal('beta', tf.zeros([K, V]), logstd=log_delta,
                         group_ndims=1)
    return model
Beispiel #5
0
 def model_func(observed):
     with zs.BayesianNet(observed) as net:
         z = zs.Normal('z',
                       mean=tf.zeros([3]),
                       std=tf.ones([3]),
                       n_samples=n_z)
         x = zs.Normal('x', mean=z, std=tf.ones([3]))
     return net
Beispiel #6
0
def vae(observed, n, n_x, n_z, n_samples, is_training):
    with zs.BayesianNet(observed=observed) as model:
        normalizer_params = {
            'is_training': is_training,
            'updates_collections': None
        }

        z_mean = tf.zeros([n, n_z])  # the mean of z is the zero vector
        z_std = 1.  # the covariance of z is the identity matrix, here a scalar is sufficient because it will be broadcasted to a vector and then used as the diagonal of the covariance matrix in zhusuan
        '''
        TODO1: sampling z using the Gaussian distribution of zhusuan
            > given input
                - z_mean, z_std, n_samples
                - set group_event_ndims as 1
            > e.g.
                - x = zs.Bernoulli('x', mu, group_event_ndims=1)
        '''

        z = zs.Normal('z',
                      z_mean,
                      std=z_std,
                      n_samples=n_samples,
                      group_event_ndims=1)

        lx_z = layers.fully_connected(
            z,
            500,
            normalizer_fn=layers.batch_norm,
            normalizer_params=normalizer_params
        )  # a mlp layer of 500 hidden units with z as the input
        '''
        TODO2: add one more mlp layer with 500 hidden units
            > given input 
                - lx_z_1, size of hidden units, normalizer_params
                - add batch_norm as the normalizer_fn
            > e.g.
                see the above line
        '''

        lx_z = layers.fully_connected(lx_z,
                                      500,
                                      normalizer_fn=layers.batch_norm,
                                      normalizer_params=normalizer_params)

        x_mean = layers.fully_connected(lx_z, n_x,
                                        activation_fn=None)  # compute the mean
        x_logstd = layers.fully_connected(
            lx_z, n_x, activation_fn=None)  # compute the log std

        x = zs.Normal('x',
                      x_mean,
                      logstd=x_logstd,
                      n_samples=n_samples,
                      group_event_ndims=1)

    return model, x_mean
Beispiel #7
0
def lntm(observed, D, K, V, eta_mean, eta_logstd):
    with zs.BayesianNet(observed=observed) as model:
        eta = zs.Normal('eta',
                        tf.tile(tf.expand_dims(eta_mean, 0), [D, 1]),
                        logstd=tf.tile(tf.expand_dims(eta_logstd, 0), [D, 1]),
                        group_event_ndims=1)
        beta = zs.Normal('beta', tf.zeros([K, V]),
                         logstd=tf.ones([K, V]) * log_delta,
                         group_event_ndims=1)
    return model
Beispiel #8
0
def lntm(observed, n_chains, D, K, V, eta_mean, eta_logstd):
    with zs.BayesianNet(observed=observed) as model:
        D_multiple = tf.stack([D, 1])
        n_chains_multiple = tf.stack([n_chains, 1, 1])
        eta = zs.Normal('eta',
                        tf.tile(tf.expand_dims(
                            tf.tile(tf.expand_dims(eta_mean, 0), D_multiple),
                            0), n_chains_multiple),
                        logstd=eta_logstd,
                        group_ndims=1)
        beta = zs.Normal('beta', tf.zeros([K, V]), logstd=log_delta,
                         group_ndims=1)
    return model
Beispiel #9
0
def bayesianNN(observed, x, n_x, layer_sizes, n_particles, batchsize,
               segment_length):
    with zs.BayesianNet(observed=observed) as model:
        ws = []
        for i, (n_in,
                n_out) in enumerate(zip(layer_sizes[:-1], layer_sizes[1:])):
            w_mu = tf.zeros([1, n_out, n_in + 1])
            ws.append(
                zs.Normal('w' + str(i),
                          w_mu,
                          std=1.,
                          n_samples=n_particles,
                          group_ndims=2))

        # forward
        # replicate input to sample many networks
        ly_x = tf.expand_dims(
            tf.tile(tf.expand_dims(x, 0), [n_particles, 1, 1]), 3)
        for i in range(len(ws)):
            # tile weights per batch and frame
            w = tf.tile(ws[i], [1, tf.shape(x)[0], 1, 1])
            # add bias
            ly_x = tf.concat(
                [ly_x, tf.ones([n_particles, tf.shape(x)[0], 1, 1])], 2)
            # forward pass
            ly_x = tf.matmul(w, ly_x) / tf.sqrt(tf.to_float(tf.shape(ly_x)[2]))
            # add relu activation if not last layer
            if i < len(ws) - 1:
                ly_x = tf.nn.relu(ly_x)

        # y_mean = fNN(x, W)
        reward_mean = tf.squeeze(ly_x, [2, 3])

        # reshape rewards to sum up segments
        segment_rewards = tf.reshape(reward_mean,
                                     (-1, batchsize, segment_length))
        segment_rewards = tf.reduce_sum(segment_rewards, axis=2)

        # y ~ N(y|y_mean, y_logstd)  : noise is added to the output to get a tractable likelihood
        segment_logstd = tf.get_variable(
            'segment_logstd',
            shape=[],
            initializer=tf.constant_initializer(0.))
        _ = zs.Normal('segment_rewards',
                      segment_rewards,
                      logstd=segment_logstd)

    return model, reward_mean, None, segment_rewards
Beispiel #10
0
def var_dropout(observed, x, n, net_size, n_particles, is_training):
    with zs.BayesianNet(observed=observed) as model:
        h = x
        normalizer_params = {
            'is_training': is_training,
            'updates_collections': None
        }
        for i, [n_in, n_out] in enumerate(zip(net_size[:-1], net_size[1:])):
            eps_mean = tf.zeros([n, n_in])
            # Adding noise to Weights
            eps = zs.Normal('layer' + str(i) + '/eps',
                            eps_mean,
                            std=1.,
                            n_samples=n_particles,
                            group_ndims=1)

            h = layers.fully_connected(h * eps,
                                       n_out,
                                       normalizer_fn=layers.batch_norm,
                                       normalizer_params=normalizer_params)
            if i < len(net_size) - 2:
                h = tf.nn.relu(h)

        y = zs.Categorical('y', h, group_ndims=0)

    return model, h
Beispiel #11
0
def q_net(x, n_xl, n_z, n_particles, is_training):
    with zs.BayesianNet() as variational:
        normalizer_params = {
            'is_training': is_training,
            'updates_collections': None
        }
        lz_x = tf.reshape(tf.to_float(x), [-1, n_xl, n_xl, 1])
        lz_x = layers.conv2d(lz_x,
                             32,
                             kernel_size=5,
                             stride=2,
                             normalizer_fn=layers.batch_norm,
                             normalizer_params=normalizer_params)
        lz_x = layers.conv2d(lz_x,
                             64,
                             kernel_size=5,
                             stride=2,
                             normalizer_fn=layers.batch_norm,
                             normalizer_params=normalizer_params)
        lz_x = layers.conv2d(lz_x,
                             128,
                             kernel_size=5,
                             padding='VALID',
                             normalizer_fn=layers.batch_norm,
                             normalizer_params=normalizer_params)
        lz_x = layers.dropout(lz_x, keep_prob=0.9, is_training=is_training)
        lz_x = tf.reshape(lz_x, [-1, 128 * 3 * 3])
        lz_mean = layers.fully_connected(lz_x, n_z, activation_fn=None)
        lz_logstd = layers.fully_connected(lz_x, n_z, activation_fn=None)
        z = zs.Normal('z',
                      lz_mean,
                      lz_logstd,
                      n_samples=n_particles,
                      group_event_ndims=1)
    return variational
Beispiel #12
0
    def __init__(self,
                 obs_shape,
                 act_shape,
                 observed,
                 n_particles,
                 segment_length,
                 batchsize,
                 h_size=64):
        self.n_particles = n_particles
        self.batchsize = batchsize
        self.segment_length = segment_length

        with zs.BayesianNet(observed=observed) as model:
            input_dim = np.prod(obs_shape) + np.prod(act_shape)
            layer_sizes = [input_dim] + [64] + [1]
            self.w_names = ['w' + str(i) for i in range(len(layer_sizes) - 1)]

            self.ws = []
            for i, (n_in,
                    n_out) in enumerate(zip(layer_sizes[:-1],
                                            layer_sizes[1:])):
                w_mu = tf.zeros([1, n_out, n_in + 1])
                self.ws.append(
                    zs.Normal('w' + str(i),
                              w_mu,
                              std=1.,
                              n_samples=n_particles,
                              group_ndims=2))

            self.model = model
Beispiel #13
0
    def vae(observed, n, dim_x, dim_z, n_particles):
        '''decoder: z-->x'''
        with zs.BayesianNet(observed=observed) as model:
            pai = tf.get_variable('pai', shape=[dim_z],
                                dtype=tf.float32,
                                trainable=True,
                                initializer=tf.constant_initializer(1.0), #tf.random_uniform_initializer(),  #tf.ones([dim_z]),
                                )
            n_pai = tf.tile(tf.expand_dims(pai, 0), [n, 1])
            z = zs.OnehotCategorical('z', logits=n_pai,
                            dtype=tf.float32,
                            n_samples=n_particles
                            #group_event_ndims=1
                            )  # zhusuan.model.stochastic.OnehotCategorical
            print('-'*10, 'z:', z.tensor.get_shape().as_list()) # [n_particles, None, dim_z]
            mu = tf.get_variable('mu', shape=[dim_z, dim_x],
                        dtype=tf.float32,
                        initializer=tf.random_uniform_initializer(0, 1))
            log_sigma = tf.get_variable('log_sigma', shape=[dim_z, dim_x],
                        dtype=tf.float32,
                        initializer=tf.random_uniform_initializer(-3, -2)
                        ) # tf.random_normal_initializer(-3, 0.5)) #tf.contrib.layers.xavier_initializer())
            x_mean = tf.reshape(tf.matmul(tf.reshape(z, [-1, dim_z]), mu), [n_particles, n, dim_x]) # [n_particles, None, dim_x]
            x_logstd = tf.reshape(tf.matmul(tf.reshape(z, [-1, dim_z]), log_sigma), [n_particles, n, dim_x])

            # print('x_mean:', x_mean.get_shape().as_list())
            # print('x_logstd:', x_logstd.get_shape().as_list())
            x = zs.Normal('x', mean=x_mean, logstd=x_logstd, group_event_ndims=1)
            # print('x:', x.tensor.get_shape().as_list())
        return model, x.tensor, z.tensor
 def forward(self, mean):
     mean = tf.squeeze(mean, [-1])
     prec = self.ps(tf.shape(mean)[0])
     prec = tf.stop_gradient(prec)
     y_logstd = -0.5 * tf.log(prec)
     y = zs.Normal('y', mean, logstd=y_logstd * tf.ones_like(mean))
     return y
Beispiel #15
0
def vae_conv(observed, n, n_x, n_z, n_particles, is_training):
    with zs.BayesianNet(observed=observed) as model:
        normalizer_params = {'is_training': is_training,
                             'updates_collections': None}
        z_mean = tf.zeros([n, n_z])
        z = zs.Normal('z', z_mean, std=1., n_samples=n_particles,
                      group_event_ndims=1)
        lx_z = tf.reshape(z, [-1, 1, 1, n_z])
        lx_z = layers.conv2d_transpose(
            lx_z, 128, kernel_size=3, padding='VALID',
            normalizer_fn=layers.batch_norm,
            normalizer_params=normalizer_params)
        lx_z = layers.conv2d_transpose(
            lx_z, 64, kernel_size=5, padding='VALID',
            normalizer_fn=layers.batch_norm,
            normalizer_params=normalizer_params)
        lx_z = layers.conv2d_transpose(
            lx_z, 32, kernel_size=5, stride=2,
            normalizer_fn=layers.batch_norm,
            normalizer_params=normalizer_params)
        lx_z = layers.conv2d_transpose(
            lx_z, 1, kernel_size=5, stride=2,
            activation_fn=None)
        x_logits = tf.reshape(lx_z, [n_particles, n, -1])
        x = zs.Bernoulli('x', x_logits, group_event_ndims=1)
    return model
Beispiel #16
0
def q_net(observed, x, n_z, n_samples, is_training):
    with zs.BayesianNet(observed=observed) as variational:
        normalizer_params = {"is_training": is_training, "updates_collections": None}
        x = tf.to_float(x)  # for computation issue

        """
        TODO3: add two more mlp layers with 500 hidden units
            > given input 
                - x, size of hidden units, normalizer_params
                - add batch_norm as the normalizer_fn
            > e.g.
                see the generative model
        """
        lz_x_1 = layers.fully_connected(
            x, 500, normalizer_fn=layers.batch_norm, normalizer_params=normalizer_params
        )  # a mlp layer of 500 hidden units with z as the input

        lz_x_2 = layers.fully_connected(
            lz_x_1, 500, normalizer_fn=layers.batch_norm, normalizer_params=normalizer_params
        )  # a mlp layer of 500 hidden units with z as the input

        z_mean = layers.fully_connected(lz_x_2, n_z, activation_fn=None)  # compute the mean
        z_logstd = layers.fully_connected(lz_x_2, n_z, activation_fn=None)  # compute the log std

        """
        TODO4: sampling z using the Gaussian distribution of zhusuan
            > given input
                - z_mean, z_logstd (note that it is not std), n_samples
                - set group_event_ndims as 1
            > e.g.
                - x = zs.Bernoulli('x', mu, group_event_ndims=1)
        """
        z = zs.Normal("z", z_mean, logstd=z_logstd, group_ndims=1)

    return variational
Beispiel #17
0
def q(observed, n, net_size, n_particles):
    # Build the variational posterior distribution.
    # We assume it is factorized
    with zs.BayesianNet(observed=observed) as variational:
        ws = []
        for i, [n_in, n_out] in enumerate(zip(net_size[:-1], net_size[1:])):
            with tf.variable_scope('layer' + str(i)):
                logit_alpha = tf.get_variable(
                    'logit_alpha', [n_in],
                    initializer=tf.constant_initializer(0.1))

            alpha = tf.nn.sigmoid(logit_alpha)
            alpha = tf.tile(tf.expand_dims(alpha, 0), [n, 1])
            # eps = zs.Normal('layer' + str(i) + '/eps',
            #     1., logstd=0.5 * tf.log(alpha + 1e-6),
            #   n_samples=n_particles, group_ndims=1)
            w_mean = tf.get_variable('w_mean_' + str(i),
                                     shape=[n_in],
                                     initializer=tf.constant_initializer(0.))
            w_logstd = tf.get_variable('w_logstd_' + str(i),
                                       shape=[n_in],
                                       initializer=tf.constant_initializer(0.))
            w_mean = tf.tile(tf.expand_dims(w_mean, 0), [n, 1])
            w_logstd = tf.tile(tf.expand_dims(w_logstd, 0), [n, 1])
            ws.append(
                zs.Normal('layer' + str(i) + '/eps',
                          w_mean,
                          logstd=w_logstd,
                          n_samples=n_particles,
                          group_ndims=1))
    return variational
Beispiel #18
0
 def q_net(x, n_z):
     with zs.BayesianNet() as variational:
         lz_x = layers.fully_connected(tf.to_float(x), 500)
         lz_x = layers.fully_connected(lz_x, 500)
         z_mean = layers.fully_connected(lz_x, n_z, activation_fn=None)
         z_logstd = layers.fully_connected(lz_x, n_z, activation_fn=None)
         z = zs.Normal('z', z_mean, logstd=z_logstd, group_event_ndims=1)
     return variational
Beispiel #19
0
def q_net(x, n_h):
    with zs.BayesianNet() as qh:
        lz_x = layers.fully_connected(tf.to_float(x), 512)
        lz_x = layers.fully_connected(lz_x, 512)
        h_mean = layers.fully_connected(lz_x, n_h, activation_fn=None)
        h_logstd = layers.fully_connected(lz_x, n_h, activation_fn=None)
        h = zs.Normal('h', h_mean, logstd=h_logstd, group_ndims=1)
    return qh
Beispiel #20
0
 def q_z_xy(self, captions, labels, lengths, images=None):
     """Calculate approximate posterior q(z|x, y, f(I))
     Returns:
         model: zhusuan model object, can be used for getting probabilities
     """
     if images is not None:
         self.images_fv = images
     with zs.BayesianNet() as model:
         # encoder and decoder have different embeddings but the same image features
         with tf.device("/cpu:0"):
             embedding = tf.get_variable(
                         "enc_embeddings", [self.vocab_size,
                                            self.embed_size],
                         dtype=tf.float32)
             vect_inputs = tf.nn.embedding_lookup(embedding, captions)
         with tf.name_scope(name="net") as scope1:
             cell_0 = make_rnn_cell(
                 [self.lstm_hidden],
                 base_cell=tf.contrib.rnn.LSTMCell)
             zero_state0 = cell_0.zero_state(
                 batch_size=tf.shape(self.images_fv)[0],
                 dtype=tf.float32)
             # run this cell to get initial state
             added_shape = self.embed_size + self.params.n_classes
             im_f = tf.layers.dense(self.images_fv, added_shape)
             _, initial_state0 = cell_0(im_f, zero_state0)
             # c = h = tf.layers.dense(self.images_fv,
             #                         self.params.decoder_hidden,
             #                         name='dec_init_map')
             # initial_state0 = (tf.nn.rnn_cell.LSTMStateTuple(c, h), )
             # x, y
             y = tf.tile(tf.expand_dims(labels, 1),
                         [1, tf.shape(vect_inputs)[1], 1])
             vect_inputs = tf.concat([vect_inputs, tf.to_float(y)], 2)
             outputs, final_state = tf.nn.dynamic_rnn(cell_0,
                                                      inputs=vect_inputs,
                                                      sequence_length=lengths,
                                                      initial_state=initial_state0,
                                                      swap_memory=True,
                                                      dtype=tf.float32,
                                                      scope=scope1)
         # [batch_size, 2 * lstm_hidden_size]
         # final_state = ((c, h), )
         final_state = final_state[0][1]
         lz_mean = layers.dense(inputs=final_state,
                                units=self.latent_size,
                                activation=None)
         lz_logstd = layers.dense(inputs=final_state,
                                  units=self.latent_size,
                                  activation=None)
         lz_std = tf.exp(lz_logstd)
         # define latent variable`s Stochastic Tensor
         # add mu_k, sigma_k, CVAe ag-cvae
         tm_list = []  # means
         tl_list = []  # log standard deviations
         z = zs.Normal('z', mean=lz_mean, std=lz_std, group_ndims=1,
                       n_samples=self.z_samples)
     return model, tm_list, tl_list
Beispiel #21
0
def q_net_font(observed, x, is_training):
    with zs.BayesianNet(observed=observed) as encoder:
        normalizer_params = {
            'is_training': is_training,
            'updates_collections': None
        }
        x = tf.reshape(tf.to_float(x), [-1, n_xl, n_xl, 1])
        ladder0 = layers.conv2d(
            x,
            ngf,
            4,
            stride=2,
            activation_fn=lrelu,
            normalizer_fn=layers.batch_norm,
            normalizer_params=normalizer_params,
            weights_initializer=init_ops.RandomNormal(stddev=0.02))
        ladder0 = layers.conv2d(
            ladder0,
            ngf * 2,
            4,
            stride=2,
            activation_fn=lrelu,
            normalizer_fn=layers.batch_norm,
            normalizer_params=normalizer_params,
            weights_initializer=init_ops.RandomNormal(stddev=0.02))
        ladder0 = layers.conv2d(
            ladder0,
            ngf * 4,
            4,
            stride=2,
            activation_fn=lrelu,
            normalizer_fn=layers.batch_norm,
            normalizer_params=normalizer_params,
            weights_initializer=init_ops.RandomNormal(stddev=0.02))
        ladder0 = layers.conv2d(
            ladder0,
            ngf * 8,
            4,
            stride=2,
            activation_fn=lrelu,
            normalizer_fn=layers.batch_norm,
            normalizer_params=normalizer_params,
            weights_initializer=init_ops.RandomNormal(stddev=0.02))
        ladder0 = layers.flatten(ladder0)
        font_mean = layers.fully_connected(ladder0,
                                           font_dim,
                                           activation_fn=tf.identity)
        font_std = layers.fully_connected(ladder0,
                                          font_dim,
                                          activation_fn=tf.sigmoid)

        z_font = zs.Normal('z_font',
                           mean=font_mean,
                           std=font_std,
                           n_samples=1,
                           group_event_ndims=1)
    return encoder, z_font
Beispiel #22
0
 def vae(observed, n, n_x, n_z):
     with zs.BayesianNet(observed=observed) as model:
         z_mean = tf.zeros([n, n_z])
         z = zs.Normal('z', z_mean, std=1., group_event_ndims=1)
         lx_z = layers.fully_connected(z, 500)
         lx_z = layers.fully_connected(lx_z, 500)
         x_logits = layers.fully_connected(lx_z, n_x, activation_fn=None)
         x = zs.Bernoulli('x', x_logits, group_event_ndims=1)
     return model, x_logits
Beispiel #23
0
def q_net(observed, x, n_z, n_particles):
    with zs.BayesianNet(observed=observed) as variational:
        lz_x = layers.fully_connected(tf.to_float(x), 500)
        lz_x = layers.fully_connected(lz_x, 500)
        lz_mean = layers.fully_connected(lz_x, n_z, activation_fn=None)
        lz_logstd = layers.fully_connected(lz_x, n_z, activation_fn=None)
        z = zs.Normal('z', lz_mean, logstd=lz_logstd, n_samples=n_particles,
                      group_ndims=1)
    return variational
Beispiel #24
0
def mean_field_variational(n_particles):
    with zs.BayesianNet() as variational:
        z_mean, z_logstd = [], []
        for i in range(2):
            z_mean.append(tf.Variable(-2.))
            z_logstd.append(tf.Variable(-5.))
            _ = zs.Normal('z' + str(i + 1), z_mean[i], z_logstd[i],
                          n_samples=n_particles)
    return variational, z_mean, z_logstd
Beispiel #25
0
 def zs_model(self, observed=None, n_x=None, n_z=None,
              is_reparameterized=None, z_group_ndims=1, x_group_ndims=1):
     if is_reparameterized is None:
         is_reparameterized = True
     with zs.BayesianNet(observed) as net:
         z = zs.Normal('z',
                       mean=tf.zeros([BATCH_SIZE, Z_DIMS]),
                       std=tf.ones([BATCH_SIZE, Z_DIMS]),
                       is_reparameterized=is_reparameterized,
                       n_samples=n_z,
                       group_ndims=z_group_ndims)
         x_params = self.h_for_p_x(z)
         x = zs.Normal('x',
                       mean=x_params['mean'],
                       logstd=x_params['logstd'],
                       n_samples=n_x,
                       group_ndims=x_group_ndims)
     return net
Beispiel #26
0
def qz_xy(x, y, n_z, n_particles):
    with zs.BayesianNet() as variational:
        lz_xy = layers.fully_connected(tf.to_float(tf.concat([x, y], 1)), 500)
        lz_xy = layers.fully_connected(lz_xy, 500)
        lz_mean = layers.fully_connected(lz_xy, n_z, activation_fn=None)
        lz_logstd = layers.fully_connected(lz_xy, n_z, activation_fn=None)
        z = zs.Normal('z', lz_mean, logstd=lz_logstd, n_samples=n_particles,
                      group_ndims=1)
    return variational
def lntm(observed, n_chains, n_docs, n_topics, n_vocab, eta_mean, eta_logstd):
    with zs.BayesianNet(observed=observed) as model:
        eta_mean = tf.tile(tf.expand_dims(eta_mean, 0), [n_docs, 1])
        # eta/theta: Unnormalized/normalized document-topic matrix
        eta = zs.Normal('eta', eta_mean, logstd=eta_logstd, n_samples=n_chains,
                        group_ndims=1)
        theta = tf.nn.softmax(eta)
        # beta/phi: Unnormalized/normalized topic-word matrix
        beta = zs.Normal('beta', tf.zeros([n_topics, n_vocab]),
                         logstd=log_delta, group_ndims=1)
        phi = tf.nn.softmax(beta)
        # doc_word: Document-word matrix
        doc_word = tf.matmul(tf.reshape(theta, [-1, n_topics]), phi)
        doc_word = tf.reshape(doc_word, [n_chains, n_docs, n_vocab])
        x = zs.UnnormalizedMultinomial('x', tf.log(doc_word),
                                       normalize_logits=False,
                                       dtype=tf.float32)
    return model
Beispiel #28
0
def gaussian(observed, n_x, stdev, n_particles):
    with zs.BayesianNet(observed=observed) as model:
        x_mean = tf.zeros([n_x])
        x = zs.Normal('x',
                      x_mean,
                      std=stdev,
                      n_samples=n_particles,
                      group_ndims=1)
    return model
Beispiel #29
0
def unlabeled_proposal(x, n_y, n_z, n_particles):
    with zs.BayesianNet() as proposal:
        y_logits = qy_x(x, n_y)
        y = zs.OnehotCategorical('y', y_logits, n_samples=n_particles)
        x_tiled = tf.tile(tf.expand_dims(x, 0), [n_particles, 1, 1])
        z_mean, z_logstd = qz_xy(x_tiled, y, n_z)
        z = zs.Normal('z', z_mean, logstd=z_logstd, group_ndims=1,
                      is_reparameterized=False)
    return proposal
Beispiel #30
0
def qh_net(x, z, n_h):
    with zs.BayesianNet() as variational:
        lh_x = layers.fully_connected(tf.to_float(tf.concat([x, z], axis=1)),
                                      500)
        lh_x = layers.fully_connected(lh_x, 500)
        h_mean = layers.fully_connected(lh_x, n_h, activation_fn=None)
        h_logstd = layers.fully_connected(lh_x, n_h, activation_fn=None)
        h = zs.Normal('h', h_mean, logstd=h_logstd, group_ndims=1)
    return variational