예제 #1
0
 def _encoder(self, x, is_training=None):
     net = self.arch['encoder']
     for i, (o, k, s) in enumerate(zip(net['output'], net['kernel'], net['stride'])):
         x = conv2d_nchw_layernorm(
             x, o, k, s, lrelu,
             name='Conv2d-{}'.format(i)
         )
     x = slim.flatten(x)
     z_mu = tf.layers.dense(x, self.arch['z_dim'])
     z_lv = tf.layers.dense(x, self.arch['z_dim'])
     return z_mu, z_lv
예제 #2
0
 def _encoder(self, x, is_training=None):
     net = self.arch['encoder']
     for i, (o, k, s) in enumerate(zip(net['output'], net['kernel'], net['stride'])):
         x = conv2d_nchw_layernorm(
             x, o, k, s, lrelu,
             name='Conv2d-{}'.format(i)
         )
     x = slim.flatten(x)
     z_mu = tf.layers.dense(x, self.arch['z_dim'])
     z_lv = tf.layers.dense(x, self.arch['z_dim'])
     return z_mu, z_lv
예제 #3
0
 def _discriminator(self, x, is_training=None):
     net = self.arch['discriminator']
     for i, (o, k, s) in enumerate(
             zip(net['output'], net['kernel'], net['stride'])):
         x = conv2d_nchw_layernorm(x,
                                   o,
                                   k,
                                   s,
                                   lrelu,
                                   name='Conv2d-{}'.format(i))
     x = slim.flatten(x)
     d = tf.layers.dense(x, 1)
     return d
예제 #4
0
    def _encoder(self, x, net):
        x = tf.transpose(x, perm=[0, 3, 2, 1]) # [N, d, n_frames, 1]
        for i, (o, k, s) in enumerate(zip(net['output'], net['kernel'], net['stride'])):
            x = conv2d_nchw_layernorm(
                x, o, k, s, lrelu,
                name='Conv-{}'.format(i)
            )

        # carefully design the architecture so that now x has shape [N, C, n_frames, 1]
        batch_size, c, n_frames, w = x.get_shape().as_list()
        x = tf.transpose(x, perm=[0, 2, 1, 3]) # [N, n_frames, C, 1]
        x = tf.squeeze(x, axis=[-1]) # [N, n_frames, C]
        z_mu = tf.layers.dense(x, self.arch['z_dim'], name='Dense-mu') # [N, n_frames, z_dim]
        z_lv = tf.layers.dense(x, self.arch['z_dim'], name='Dense-lv') # [N, n_frames, z_dim]
        return z_mu, z_lv
예제 #5
0
    def _discriminator(self, x, net):
        x = tf.transpose(x, perm=[0, 3, 2, 1]) # [N, d, n_frames, 1]
        
        for i, (o, k, s) in enumerate(zip(net['output'], net['kernel'], net['stride'])):
            x = conv2d_nchw_layernorm(
                x, o, k, s, lrelu,
                name='Conv-{}'.format(i)
            )

        # carefully design the architecture so that now x has shape [N, C, n_frames, 1]
        batch_size, c, n_frames, w = x.get_shape().as_list()
        x = tf.transpose(x, perm=[0, 2, 1, 3]) # [N, n_frames, C, 1]
        x = tf.squeeze(x, axis=[-1]) # [N, n_frames, C]
        x = tf.layers.dense(x, 1) # [N, n_frames, 1]
        return tf.reduce_mean(x, axis=1) #[N, 1]
예제 #6
0
 def _encoder(self, x, is_training=None):
     net = self.arch['encoder']
     print "x shape:", x.get_shape().as_list()
     #time.sleep(100)
     for i, (o, k, s) in enumerate(
             zip(net['output'], net['kernel'], net['stride'])):
         x = conv2d_nchw_layernorm(x,
                                   o,
                                   k,
                                   s,
                                   lrelu,
                                   name='Conv2d-{}'.format(i))
         print("ConvT-LN{}, shape:".format(i), x.get_shape().as_list())
     print "x shape:", x.get_shape().as_list()
     x = slim.flatten(x)
     print "x shape:", x.get_shape().as_list()
     #time.sleep(100)
     z_mu = tf.layers.dense(x, self.arch['z_dim'])
     z_lv = tf.layers.dense(x, self.arch['z_dim'])
     print "z_mu:", z_mu.get_shape().as_list()
     #time.sleep(100)
     return z_mu, z_lv