예제 #1
0
    def discriminator_net(self, lx, reuse=False):
        """
        :param lx: the images from lens
        :param reuse:
        :return:
        """
        # layer height, width
        s_h, s_w, _ = self.input_size
        s_h2, s_w2 = utl.get_out_size(s_h, 2), utl.get_out_size(s_w, 2)
        s_h4, s_w4 = utl.get_out_size(s_h2, 2), utl.get_out_size(s_w2, 2)
        s_h8, s_w8 = utl.get_out_size(s_h4, 2), utl.get_out_size(s_w4, 2)
        s_h16, s_w16 = utl.get_out_size(s_h8, 2), utl.get_out_size(s_w8, 2)
        with tf.variable_scope('discriminator', reuse=reuse):
            h0 = ops.conv2d(lx, output_num=self.size, name='d_h0', reuse=reuse)
            h0 = ops.lrelu(h0, name='d_l0')

            h1 = ops.conv2d(h0, output_num=self.size * 2, name='d_h1', reuse=reuse)
            h1 = ops.batch_normalizer(h1, name='d_bn1', reuse=reuse)
            h1 = ops.lrelu(h1, name='d_l1')

            h2 = ops.conv2d(h1, output_num=self.size * 4, name='d_h2', reuse=reuse)
            h2 = ops.batch_normalizer(h2, name='d_bn2', reuse=reuse)
            h2 = ops.lrelu(h2, name='d_l2')

            h3 = ops.conv2d(h2, output_num=self.size * 8, name='d_h3', reuse=reuse)
            h3 = ops.batch_normalizer(h3, name='d_bn3', reuse=reuse)
            h3 = ops.lrelu(h3, name='d_l3')

            h4 = tf.reshape(h3, [self.batch_size, s_h16 * s_w16 * self.size * 8])

            h4 = ops.full_connect(h4, output_num=1, name='d_full', reuse=reuse)
            return h4
예제 #2
0
    def generate_net(self, noise, train=True, reuse=False):
        """
        :param noise: source noise z
        :param train:
        :param reuse:
        :return:
        """
        # layer height, width
        s_h, s_w, _ = self.input_size
        s_h2, s_w2 = utl.get_out_size(s_h, 2), utl.get_out_size(s_w, 2)
        s_h4, s_w4 = utl.get_out_size(s_h2, 2), utl.get_out_size(s_w2, 2)
        s_h8, s_w8 = utl.get_out_size(s_h4, 2), utl.get_out_size(s_w4, 2)
        s_h16, s_w16 = utl.get_out_size(s_h8, 2), utl.get_out_size(s_w8, 2)
        with tf.variable_scope('generator', reuse=reuse):
            # AttributeError: 'tuple' object has no attribute 'as_list
            z = ops.full_connect(noise, output_num=self.size * 8 * s_h16 * s_w16, name='g_full', reuse=reuse)
            # reshape [batch_size, h, w, c]
            h0 = tf.reshape(z, [-1, s_h16, s_w16, self.size * 8])
            h0 = ops.batch_normalizer(h0, train=train, name='g_bn0', reuse=reuse)
            h0 = ops.lrelu(h0, name='g_l0')

            h1 = ops.deconv2d(h0, output_size=[self.batch_size, s_h8, s_w8, self.size * 4], name='g_h1', reuse=reuse)
            h1 = ops.batch_normalizer(h1, train=train, name='g_bn1', reuse=reuse)
            h1 = ops.lrelu(h1, name='g_l1')

            h2 = ops.deconv2d(h1, output_size=[self.batch_size, s_h4, s_w4, self.size * 2], name='g_h2', reuse=reuse)
            h2 = ops.batch_normalizer(h2, train=train, name='g_bn2', reuse=reuse)
            h2 = ops.lrelu(h2, name='g_l2')

            h3 = ops.deconv2d(h2, output_size=[self.batch_size, s_h2, s_w2, self.size * 1], name='g_h3', reuse=reuse)
            h3 = ops.batch_normalizer(h3, train=train, name='g_bn3', reuse=reuse)
            h3 = ops.lrelu(h3, name='g_l3')

            h4 = ops.deconv2d(h3, output_size=[self.batch_size, ] + self.input_size, name='g_h4', reuse=reuse)
            x_generate = tf.nn.tanh(h4, name='g_t4')
            return x_generate