コード例 #1
0
def dec_down(
        gs, zs_posterior, training, init=False, dropout_p=0.5,
        n_scales=1, n_residual_blocks=2, activation="elu",
        n_latent_scales=2):
    assert n_residual_blocks % 2 == 0
    gs = list(gs)
    zs_posterior = list(zs_posterior)
    with model_arg_scope(
            init=init, dropout_p=dropout_p, activation=activation):
        # outputs
        hs = []  # hidden units
        ps = []  # priors
        zs = []  # prior samples
        # prepare input
        n_filters = gs[-1].shape.as_list()[-1]
        h = nn.nin(gs[-1], n_filters)
        for l in range(n_scales):
            # level module
            ## hidden units
            for i in range(n_residual_blocks // 2):
                h = nn.residual_block(h, gs.pop())
                hs.append(h)
            if l < n_latent_scales:
                ## prior
                spatial_shape = h.shape.as_list()[1]
                n_h_channels = h.shape.as_list()[-1]

                ### no spatial correlations
                p = latent_parameters(h)
                ps.append(p)
                z_prior = latent_sample(p)
                zs.append(z_prior)

                if training:
                    ## posterior
                    z = zs_posterior.pop(0)
                else:
                    ## prior
                    z = z_prior
                for i in range(n_residual_blocks // 2):
                    n_h_channels = h.shape.as_list()[-1]
                    h = tf.concat([h, z], axis=-1)
                    h = nn.nin(h, n_h_channels)
                    h = nn.residual_block(h, gs.pop())
                    hs.append(h)
            else:
                for i in range(n_residual_blocks // 2):
                    h = nn.residual_block(h, gs.pop())
                    hs.append(h)
            # prepare input to next level
            if l + 1 < n_scales:
                n_filters = gs[-1].shape.as_list()[-1]
                h = nn.upsample(h, n_filters)

        assert not gs
        if training:
            assert not zs_posterior

        return hs, ps, zs
コード例 #2
0
def enc_down(
        gs, init = False, dropout_p = 0.5,
        n_scales = 1, n_residual_blocks = 2, activation = "elu",
        n_latent_scales = 2):
    assert n_residual_blocks % 2 == 0
    gs = list(gs)
    with model_arg_scope(
            init = init, dropout_p = dropout_p, activation = activation):
        # outputs
        hs = [] # hidden units
        qs = [] # posteriors
        zs = [] # samples from posterior
        # prepare input
        n_filters = gs[-1].shape.as_list()[-1]
        h = nn.nin(gs[-1], n_filters)
        for l in range(n_scales):
            # level module
            ## hidden units
            for i in range(n_residual_blocks // 2):
                h = nn.residual_block(h, gs.pop())
                hs.append(h)
            if l < n_latent_scales:
                ## posterior parameters
                q = latent_parameters(h)
                qs.append(q)
                ## posterior sample
                z = latent_sample(q)
                zs.append(z)
                ## sample feedback
                for i in range(n_residual_blocks // 2):
                    gz = tf.concat([gs.pop(), z], axis = -1)
                    h = nn.residual_block(h, gz)
                    hs.append(h)
            else:
                """ no need to go down any further
                for i in range(n_residual_blocks // 2):
                    h = nn.residual_block(h, gs.pop())
                    hs.append(h)
                """
                break
            # prepare input to next level
            if l + 1 < n_scales:
                n_filters = gs[-1].shape.as_list()[-1]
                h = nn.upsample(h, n_filters)

        #assert not gs # not true anymore since we break out of the loop

        return hs, qs, zs
コード例 #3
0
ファイル: model.py プロジェクト: daib13/ScaleSdVae
 def __build_decoder(self):
     with tf.name_scope('sample'):
         self.noise = tf.random_normal([self.batch_size, self.latent_dim],
                                       0.0, 1.0, tf.float32)
         self.z = self.noise * self.sd_z + self.mu_z
     feature_map = self.z
     with tf.name_scope('decoder'):
         with tf.variable_scope('decoder_w'):
             for i_fc in range(self.num_fc_layer):
                 with tf.name_scope('fc' + str(i_fc)):
                     feature_map = dense('fc' + str(i_fc) + '_w',
                                         feature_map,
                                         self.fc_dim[-1 - i_fc], self.reg,
                                         self.activation_fn)
             feature_map_dim = self.num_filter[-1] * \
                 self.smallest_size * self.smallest_size
             with tf.name_scope('fc' + str(self.num_fc_layer)):
                 feature_map = dense('fc' + str(self.num_fc_layer) + '_w',
                                     feature_map, feature_map_dim, self.reg,
                                     self.activation_fn)
                 feature_map = tf.reshape(feature_map, [
                     -1, self.smallest_size, self.smallest_size,
                     self.num_filter[-1]
                 ])
             for i_block in range(self.num_block):
                 with tf.name_scope('upsample' + str(i_block)):
                     feature_map = upsample(feature_map)
                 with tf.name_scope('block' + str(i_block)):
                     if i_block == self.num_block - 1:
                         num_filter = 3
                     else:
                         num_filter = self.num_filter[-2 - i_block]
                     feature_map = res_block('block' + str(i_block) + '_w',
                                             feature_map,
                                             self.num_layer_per_block,
                                             num_filter, self.filter_size,
                                             self.padding, self.reg,
                                             self.activation_fn)
             with tf.name_scope('x_hat'):
                 self.x_hat = tf.nn.sigmoid(feature_map)
             with tf.variable_scope('log_gamma'):
                 self.log_gamma = tf.get_variable('log_gamma', [],
                                                  tf.float32,
                                                  tf.zeros_initializer(),
                                                  trainable=True)
             with tf.name_scope('gamma'):
                 self.gamma = tf.exp(self.log_gamma)
コード例 #4
0
ファイル: models.py プロジェクト: zhouqianyu0918/stylealign
def enc_down(gs,
             init=False,
             dropout_p=0.5,
             n_scales=1,
             n_residual_blocks=2,
             activation="elu",
             n_latent_scales=2):
    assert n_residual_blocks % 2 == 0
    gs = list(gs)
    with model_arg_scope(init=init, dropout_p=dropout_p,
                         activation=activation):
        hs = []  # hidden units
        qs = []  # posteriors
        zs = []  # samples from posterior
        n_filters = gs[-1].shape.as_list()[-1]
        h = nn.nin(gs[-1], n_filters)
        for l in range(n_scales):
            for i in range(n_residual_blocks // 2):
                h = nn.residual_block(h, gs.pop())
                hs.append(h)
            if l < n_latent_scales:
                q = latent_parameters(h)  # posterior parameters
                qs.append(q)
                z = latent_sample(q)  # posterior sample
                zs.append(z)
                for i in range(n_residual_blocks // 2):
                    gz = tf.concat([gs.pop(), z], axis=-1)
                    h = nn.residual_block(h, gz)
                    hs.append(h)
            else:
                break
            if l + 1 < n_scales:
                n_filters = gs[-1].shape.as_list()[-1]
                h = nn.upsample(h, n_filters)

        return hs, qs, zs
コード例 #5
0
ファイル: models.py プロジェクト: zhouqianyu0918/stylealign
def dec_down(gs,
             zs_posterior,
             training,
             init=False,
             dropout_p=0.5,
             n_scales=1,
             n_residual_blocks=2,
             activation="elu",
             n_latent_scales=2):
    assert n_residual_blocks % 2 == 0
    gs = list(gs)
    zs_posterior = list(zs_posterior)
    with model_arg_scope(init=init, dropout_p=dropout_p,
                         activation=activation):
        # outputs
        hs = []  # hidden units
        ps = []  # priors
        zs = []  # prior samples
        # prepare input
        n_filters = gs[-1].shape.as_list()[-1]
        h = nn.nin(gs[-1], n_filters)
        for l in range(n_scales):
            # level module
            ## hidden units
            for i in range(n_residual_blocks // 2):
                h = nn.residual_block(h, gs.pop())
                hs.append(h)
            if l < n_latent_scales:
                ## prior
                spatial_shape = h.shape.as_list()[1]
                n_h_channels = h.shape.as_list()[-1]
                if spatial_shape == 1:
                    ### no spatial correlations
                    p = latent_parameters(h)
                    ps.append(p)
                    z_prior = latent_sample(p)
                    zs.append(z_prior)
                else:
                    ### four autoregressively modeled groups
                    if training:
                        z_posterior_groups = nn.split_groups(zs_posterior[0])
                    p_groups = []
                    z_groups = []
                    p_features = tf.space_to_depth(nn.residual_block(h), 2)
                    for i in range(4):
                        p_group = latent_parameters(p_features,
                                                    num_filters=n_h_channels)
                        p_groups.append(p_group)
                        z_group = latent_sample(p_group)
                        z_groups.append(z_group)
                        # ar feedback sampled from
                        if training:
                            feedback = z_posterior_groups.pop(0)
                        else:
                            feedback = z_group
                        # prepare input for next group
                        if i + 1 < 4:
                            p_features = nn.residual_block(
                                p_features, feedback)
                    if training:
                        assert not z_posterior_groups
                    # complete prior parameters
                    p = nn.merge_groups(p_groups)
                    ps.append(p)
                    # complete prior sample
                    z_prior = nn.merge_groups(z_groups)
                    zs.append(z_prior)
                ## vae feedback sampled from
                if training:
                    ## posterior
                    z = zs_posterior.pop(0)
                else:
                    ## prior
                    z = z_prior
                for i in range(n_residual_blocks // 2):
                    n_h_channels = h.shape.as_list()[-1]
                    h = tf.concat([h, z], axis=-1)
                    h = nn.nin(h, n_h_channels)
                    h = nn.residual_block(h, gs.pop())
                    hs.append(h)
            else:
                for i in range(n_residual_blocks // 2):
                    h = nn.residual_block(h, gs.pop())
                    hs.append(h)
            # prepare input to next level
            if l + 1 < n_scales:
                n_filters = gs[-1].shape.as_list()[-1]
                h = nn.upsample(h, n_filters)

        assert not gs
        if training:
            assert not zs_posterior

        return hs, ps, zs