Exemple #1
0
    def input_to_feature(self, z):
        with tf.variable_scope("g_", reuse=True):
            data_flatten = flatten(z)

            # 4 x 4
            h0_linear = tf_utils.linear(data_flatten,
                                        4 * 4 * self.gen_c[0],
                                        name='h0_linear')
            h0_reshape = tf.reshape(
                h0_linear, [tf.shape(h0_linear)[0], 4, 4, self.gen_c[0]])
            h0_batchnorm = tf_utils.batch_norm(h0_reshape,
                                               name='h0_batchnorm',
                                               _ops=self._gen_train_ops)
            h0_relu = tf.nn.relu(h0_batchnorm, name='h0_relu')

            # 8 x 8
            h1_deconv = tf_utils.deconv2d(h0_relu,
                                          self.gen_c[1],
                                          name='h1_deconv2d')
            h1_batchnorm = tf_utils.batch_norm(h1_deconv,
                                               name='h1_batchnorm',
                                               _ops=self._gen_train_ops)
            h1_relu = tf.nn.relu(h1_batchnorm, name='h1_relu')

            # 16 x 16
            h2_deconv = tf_utils.deconv2d(h1_relu,
                                          self.gen_c[2],
                                          name='h2_deconv2d')
            h2_batchnorm = tf_utils.batch_norm(h2_deconv,
                                               name='h2_batchnorm',
                                               _ops=self._gen_train_ops)
            h2_relu = tf.nn.relu(h2_batchnorm, name='h2_relu')

            return h2_relu
    def discriminator(self, data, name='d_', is_reuse=False):
        with tf.variable_scope(name) as scope:
            if is_reuse is True:
                scope.reuse_variables()

            # 64 -> 32
            h0_conv = tf_utils.conv2d(data, self.dis_c[0], name='h0_conv2d')
            h0_lrelu = tf_utils.lrelu(h0_conv, name='h0_lrelu')

            # 32 -> 16
            h1_conv = tf_utils.conv2d(h0_lrelu, self.dis_c[1], name='h1_conv2d')
            h1_batchnorm = tf_utils.batch_norm(h1_conv, name='h1_batchnorm', _ops=self._dis_train_ops)
            h1_lrelu = tf_utils.lrelu(h1_batchnorm, name='h1_lrelu')

            # 16 -> 8
            h2_conv = tf_utils.conv2d(h1_lrelu, self.dis_c[2], name='h2_conv2d')
            h2_batchnorm = tf_utils.batch_norm(h2_conv, name='h2_batchnorm', _ops=self._dis_train_ops)
            h2_lrelu = tf_utils.lrelu(h2_batchnorm, name='h2_lrelu')

            # 8 -> 4
            h3_conv = tf_utils.conv2d(h2_lrelu, self.dis_c[3], name='h3_conv2d')
            h3_batchnorm = tf_utils.batch_norm(h3_conv, name='h3_batchnorm', _ops=self._dis_train_ops)
            h3_lrelu = tf_utils.lrelu(h3_batchnorm, name='h3_lrelu')

            h3_flatten = flatten(h3_lrelu)
            h4_linear = tf_utils.linear(h3_flatten, 1, name='h4_linear')

            return tf.nn.sigmoid(h4_linear), h4_linear
    def generator(self, data, name='g_', is_reuse=False):
        with tf.variable_scope(name) as scope:
            if is_reuse is True:
                scope.reuse_variables()

            data_flatten = flatten(data)

            # 4 x 4
            h0_linear = tf_utils.linear(data_flatten, 4*4*self.gen_c[0], name='h0_linear')
            h0_reshape = tf.reshape(h0_linear, [tf.shape(h0_linear)[0], 4, 4, self.gen_c[0]])
            h0_batchnorm = tf_utils.batch_norm(h0_reshape, name='h0_batchnorm', _ops=self._gen_train_ops)
            h0_relu = tf.nn.relu(h0_batchnorm, name='h0_relu')

            # 8 x 8
            h1_deconv = tf_utils.deconv2d(h0_relu, self.gen_c[1], name='h1_deconv2d')
            h1_batchnorm = tf_utils.batch_norm(h1_deconv, name='h1_batchnorm', _ops=self._gen_train_ops)
            h1_relu = tf.nn.relu(h1_batchnorm, name='h1_relu')

            # 16 x 16
            h2_deconv = tf_utils.deconv2d(h1_relu, self.gen_c[2], name='h2_deconv2d')
            h2_batchnorm = tf_utils.batch_norm(h2_deconv, name='h2_batchnorm', _ops=self._gen_train_ops)
            h2_relu = tf.nn.relu(h2_batchnorm, name='h2_relu')

            # 32 x 32
            h3_deconv = tf_utils.deconv2d(h2_relu, self.gen_c[3], name='h3_deconv2d')
            h3_batchnorm = tf_utils.batch_norm(h3_deconv, name='h3_batchnorm', _ops=self._gen_train_ops)
            h3_relu = tf.nn.relu(h3_batchnorm, name='h3_relu')

            # 64 x 64
            output = tf_utils.deconv2d(h3_relu, self.image_size[2], name='h4_deconv2d')
            return tf.nn.tanh(output)
Exemple #4
0
    def decoder(self,data,name='dec_'):
        with tf.variable_scope(name):
            h0_linear = tf_utils.linear(data, 5*8*self.dis_c[3],input_size=1024, name='h0_linear')
            h0_reshape = tf.reshape(h0_linear, [tf.shape(h0_linear)[0],1, 5, 8, self.dis_c[3]])
            h0_batchnorm = tf_utils.batch_norm(h0_reshape, name='h0_batchnorm', _ops=self._gen_train_ops)
            h0_relu = tf.nn.relu(h0_batchnorm, name='h0_relu')

            # 8 x 8
            h1_deconv = tf_utils.deconv3d(h0_relu, self.dis_c[3],output_size=[10,15,256],k_t=4,d_t=5, d_h=2, d_w=2,padding_='SAME', name='h1_deconv3d')
            h1_batchnorm = tf_utils.batch_norm(h1_deconv, name='h1_batchnorm', _ops=self._gen_train_ops)
            h1_relu = tf.nn.relu(h1_batchnorm, name='h1_relu')

            # 16 x 16
            h2_deconv = tf_utils.deconv3d(h1_relu, self.dis_c[2],output_size=[20,30,256],stepup_out=1, name='h2_deconv3d')
            h2_batchnorm = tf_utils.batch_norm(h2_deconv, name='h2_batchnorm', _ops=self._gen_train_ops)
            h2_relu = tf.nn.relu(h2_batchnorm, name='h2_relu')

            h3_deconv = tf_utils.deconv3d(h2_relu, self.dis_c[1],output_size=[40,60,128],stepup_out=2,name='h3_deconv3d')
            h3_batchnorm = tf_utils.batch_norm(h3_deconv, name='h3_batchnorm', _ops=self._gen_train_ops)
            h3_relu = tf.nn.relu(h3_batchnorm, name='h3_relu')


            h4_deconv = tf_utils.deconv3d(h3_relu, self.dis_c[0],output_size=[79,119,64],stepup_out=2,d_t=1, d_h=2, d_w=2,padding_='SAME' ,name='h4_deconv3d')
            h4_batchnorm = tf_utils.batch_norm(h4_deconv, name='h4_batchnorm', _ops=self._gen_train_ops)
            h4_relu = tf.nn.relu(h4_batchnorm, name='h4_relu')

                # 64 x 64
            output = tf_utils.deconv3d(h4_relu, self.image_size[2],output_size=self.image_size, d_t=1, d_h=2, d_w=2,padding_='SAME',name='h5_deconv3d')
            return tf.nn.tanh(output)
Exemple #5
0
    def encoder(self,data,name='enc_'):
        with tf.variable_scope(name):
            # 64 -> 32 or 32 -> 16
            h0_conv = tf_utils.conv3d(data, self.dis_c[0], name='h0_conv3d')
            h0_lrelu = tf_utils.lrelu(h0_conv, name='h0_lrelu')

            # 32 -> 16 or 16 -> 8
            h1_conv = tf_utils.conv3d(h0_lrelu, self.dis_c[1], name='h1_conv3d')
            h1_batchnorm = tf_utils.batch_norm(h1_conv, name='h1_batchnorm', _ops=self._dis_train_ops)
            h1_lrelu = tf_utils.lrelu(h1_batchnorm, name='h1_lrelu')

            # 16 -> 8 or 8 -> 4
            h2_conv = tf_utils.conv3d(h1_lrelu, self.dis_c[2], name='h2_conv3d')
            h2_batchnorm = tf_utils.batch_norm(h2_conv, name='h2_batchnorm', _ops=self._dis_train_ops)
            h2_lrelu = tf_utils.lrelu(h2_batchnorm, name='h2_lrelu')

            h3_conv = tf_utils.conv3d(h2_lrelu, self.dis_c[3], name='h3_conv3d')
            h3_batchnorm = tf_utils.batch_norm(h3_conv, name='h3_batchnorm', _ops=self._dis_train_ops)
            h3_lrelu = tf_utils.lrelu(h3_batchnorm, name='h3_lrelu')

            h4_conv = tf_utils.conv3d(h3_lrelu, self.dis_c[3],k_d=1,k_h=2,k_w=2, name='h4_conv3d')
            h4_batchnorm = tf_utils.batch_norm(h4_conv, name='h4_batchnorm', _ops=self._dis_train_ops)
            h4_lrelu = tf_utils.lrelu(h4_batchnorm, name='h4_lrelu')

            h4_flatten = flatten(h4_lrelu)
            h5_linear = tf_utils.linear(h4_flatten, 1024, name='h4_linear')
            return tf.nn.sigmoid(h5_linear), h5_linear
Exemple #6
0
    def generator(self, data, name='g_'):
        with tf.variable_scope(name):
            data_flatten = flatten(data)

            # 4 x 4
            h0_linear = tf_utils.linear(data_flatten, 4*4*self.gen_c[0], name='h0_linear')
            h0_reshape = tf.reshape(h0_linear, [tf.shape(h0_linear)[0], 4, 4, self.gen_c[0]])
            h0_batchnorm = tf_utils.batch_norm(h0_reshape, name='h0_batchnorm', _ops=self._gen_train_ops)
            h0_relu = tf.nn.relu(h0_batchnorm, name='h0_relu')

            # 8 x 8
            h1_deconv = tf_utils.deconv2d(h0_relu, self.gen_c[1], name='h1_deconv2d')
            h1_batchnorm = tf_utils.batch_norm(h1_deconv, name='h1_batchnorm', _ops=self._gen_train_ops)
            h1_relu = tf.nn.relu(h1_batchnorm, name='h1_relu')

            # 16 x 16
            h2_deconv = tf_utils.deconv2d(h1_relu, self.gen_c[2], name='h2_deconv2d')
            h2_batchnorm = tf_utils.batch_norm(h2_deconv, name='h2_batchnorm', _ops=self._gen_train_ops)
            h2_relu = tf.nn.relu(h2_batchnorm, name='h2_relu')

            # 32 x 32
            if (self.flags.dataset == 'mnist') or (self.flags.dataset == 'cifar10'):
                output = tf_utils.deconv2d(h2_relu, self.image_size[2], name='h3_deconv2d')
                return tf.nn.tanh(output)
            else:
                h3_deconv = tf_utils.deconv2d(h2_relu, self.gen_c[3], name='h3_deconv2d')
                h3_batchnorm = tf_utils.batch_norm(h3_deconv, name='h3_batchnorm', _ops=self._gen_train_ops)
                h3_relu = tf.nn.relu(h3_batchnorm, name='h3_relu')

                # 64 x 64
                output = tf_utils.deconv2d(h3_relu, self.image_size[2], name='h4_deconv2d')
                return tf.nn.tanh(output)
Exemple #7
0
    def discriminator(self, data, name='d_', is_reuse=False):
        with tf.variable_scope(name) as scope:
            if is_reuse is True:
                scope.reuse_variables()

            # (128, 256) -> (64, 128)
            h0_conv = tf_utils.conv2d(data, self.dis_c[0], name='h0_conv2d')
            h0_lrelu = tf_utils.lrelu(h0_conv, name='h0_lrelu')

            # (64, 128) -> (32, 64)
            h1_conv = tf_utils.conv2d(h0_lrelu,
                                      self.dis_c[1],
                                      name='h1_conv2d')
            h1_batchnorm = tf_utils.batch_norm(h1_conv,
                                               name='h1_batchnorm',
                                               _ops=self._dis_train_ops)
            h1_lrelu = tf_utils.lrelu(h1_batchnorm, name='h1_lrelu')

            # (32, 64) -> (16, 32)
            h2_conv = tf_utils.conv2d(h1_lrelu,
                                      self.dis_c[2],
                                      name='h2_conv2d')
            h2_batchnorm = tf_utils.batch_norm(h2_conv,
                                               name='h2_batchnorm',
                                               _ops=self._dis_train_ops)
            h2_lrelu = tf_utils.lrelu(h2_batchnorm, name='h2_lrelu')

            # (16, 32) -> (8, 16)
            h3_conv = tf_utils.conv2d(h2_lrelu,
                                      self.dis_c[3],
                                      name='h3_conv2d')
            h3_batchnorm = tf_utils.batch_norm(h3_conv,
                                               name='h3_batchnorm',
                                               _ops=self._dis_train_ops)
            h3_lrelu = tf_utils.lrelu(h3_batchnorm, name='h3_lrelu')

            # (8, 16) -> (4, 8)
            h4_conv = tf_utils.conv2d(h3_lrelu,
                                      self.dis_c[4],
                                      name='h4_conv2d')
            h4_batchnorm = tf_utils.batch_norm(h4_conv,
                                               name='h4_batchnorm',
                                               _ops=self._dis_train_ops)
            h4_lrelu = tf_utils.lrelu(h4_batchnorm, name='h4_lrelu')

            # (4, 8) -> (2, 4)
            h5_conv = tf_utils.conv2d(h4_lrelu,
                                      self.dis_c[5],
                                      name='h5_conv2d')
            h5_batchnorm = tf_utils.batch_norm(h5_conv,
                                               name='h5_batchnorm',
                                               _ops=self._dis_train_ops)
            h5_lrelu = tf_utils.lrelu(h5_batchnorm, name='h5_lrelu')

            h5_flatten = flatten(h5_lrelu)
            h6_linear = tf_utils.linear(h5_flatten, 1, name='h6_linear')

            return tf.nn.sigmoid(h6_linear), h6_linear
Exemple #8
0
    def __call__(self, x):
        with tf.variable_scope(self.name, reuse=self.reuse):
            tf_utils.print_activations(x)

            # 200 -> 100
            h0_conv2d = tf_utils.conv2d(x, self.dis_c[0], name='h0_conv2d')
            h0_lrelu = tf_utils.lrelu(h0_conv2d, name='h0_lrelu')

            # 100 -> 50
            h1_conv2d = tf_utils.conv2d(h0_lrelu,
                                        self.dis_c[1],
                                        name='h1_conv2d')
            h1_batchnorm = tf_utils.batch_norm(h1_conv2d,
                                               name='h1_batchnorm',
                                               _ops=self._ops)
            h1_lrelu = tf_utils.lrelu(h1_batchnorm, name='h1_lrelu')

            # 50 -> 25
            h2_conv2d = tf_utils.conv2d(h1_lrelu,
                                        self.dis_c[2],
                                        name='h2_conv2d')
            h2_batchnorm = tf_utils.batch_norm(h2_conv2d,
                                               name='h2_batchnorm',
                                               _ops=self._ops)
            h2_lrelu = tf_utils.lrelu(h2_batchnorm, name='h2_lrelu')

            # 25 -> 13
            h3_conv2d = tf_utils.conv2d(h2_lrelu,
                                        self.dis_c[3],
                                        name='h3_conv2d')
            h3_batchnorm = tf_utils.batch_norm(h3_conv2d,
                                               name='h3_batchnorm',
                                               _ops=self._ops)
            h3_lrelu = tf_utils.lrelu(h3_batchnorm, name='h3_lrelu')

            # Patch GAN: 13 -> 13
            output = tf_utils.conv2d(h3_lrelu,
                                     self.dis_c[4],
                                     k_h=3,
                                     k_w=3,
                                     d_h=1,
                                     d_w=1,
                                     name='output_conv2d')

            # set reuse=True for next call
            self.reuse = True
            self.variables = tf.get_collection(
                tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name)

            return output
Exemple #9
0
    def discriminator(self, data, name='d_', is_reuse=False):
        with tf.variable_scope(name) as scope:
            if is_reuse is True:
                scope.reuse_variables()

            # 256 -> 128
            h0_conv2d = tf_utils.conv2d(data, self.dis_c[0], name='h0_conv2d')
            h0_lrelu = tf_utils.lrelu(h0_conv2d, name='h0_lrelu')

            # 128 -> 64
            h1_conv2d = tf_utils.conv2d(h0_lrelu,
                                        self.dis_c[1],
                                        name='h1_conv2d')
            h1_batchnorm = tf_utils.batch_norm(h1_conv2d,
                                               name='h1_batchnorm',
                                               _ops=self._dis_train_ops)
            h1_lrelu = tf_utils.lrelu(h1_batchnorm, name='h1_lrelu')

            # 64 -> 32
            h2_conv2d = tf_utils.conv2d(h1_lrelu,
                                        self.dis_c[2],
                                        name='h2_conv2d')
            h2_batchnorm = tf_utils.batch_norm(h2_conv2d,
                                               name='h2_batchnorm',
                                               _ops=self._dis_train_ops)
            h2_lrelu = tf_utils.lrelu(h2_batchnorm, name='h2_lrelu')

            # 32 -> 16
            h3_conv2d = tf_utils.conv2d(h2_lrelu,
                                        self.dis_c[3],
                                        name='h3_conv2d')
            h3_batchnorm = tf_utils.batch_norm(h3_conv2d,
                                               name='h3_batchnorm',
                                               _ops=self._dis_train_ops)
            h3_lrelu = tf_utils.lrelu(h3_batchnorm, name='h3_lrelu')

            # Patch GAN: 16 -> 16
            h4_conv2d = tf_utils.conv2d(h3_lrelu,
                                        self.dis_c[4],
                                        k_h=3,
                                        k_w=3,
                                        d_h=1,
                                        d_w=1,
                                        name='h4_conv2d')

            return tf.nn.sigmoid(h4_conv2d), h4_conv2d
Exemple #10
0
    def feature_to_data(self, net):
        with tf.variable_scope("g_", reuse=True):
            # 32 x 32
            h3_deconv = tf_utils.deconv2d(net,
                                          self.gen_c[3],
                                          name='h3_deconv2d')
            h3_batchnorm = tf_utils.batch_norm(h3_deconv,
                                               name='h3_batchnorm',
                                               _ops=self._gen_train_ops)
            h3_relu = tf.nn.relu(h3_batchnorm, name='h3_relu')

            # 64 x 64
            output = tf_utils.deconv2d(h3_relu,
                                       self.image_size[2],
                                       name='h4_deconv2d')
            return tf.nn.tanh(output)
Exemple #11
0
    def generator(self, data, name='g_'):
        with tf.variable_scope(name):
            data_flatten = flatten(data)

            # 2 x 4
            h0_linear = tf_utils.linear(data_flatten,
                                        2 * 4 * self.gen_c[0],
                                        name='h0_linear')
            h0_reshape = tf.reshape(
                h0_linear, [tf.shape(h0_linear)[0], 2, 4, self.gen_c[0]])
            h0_batchnorm = tf_utils.batch_norm(h0_reshape,
                                               name='h0_batchnorm',
                                               _ops=self._gen_train_ops)
            h0_relu = tf.nn.relu(h0_batchnorm, name='h0_relu')

            # 4 x 8
            h1_deconv = tf_utils.deconv2d(h0_relu,
                                          self.gen_c[1],
                                          name='h1_deconv2d')
            h1_batchnorm = tf_utils.batch_norm(h1_deconv,
                                               name='h1_batchnorm',
                                               _ops=self._gen_train_ops)
            h1_relu = tf.nn.relu(h1_batchnorm, name='h1_relu')

            # 8 x 16
            h2_deconv = tf_utils.deconv2d(h1_relu,
                                          self.gen_c[2],
                                          name='h2_deconv2d')
            h2_batchnorm = tf_utils.batch_norm(h2_deconv,
                                               name='h2_batchnorm',
                                               _ops=self._gen_train_ops)
            h2_relu = tf.nn.relu(h2_batchnorm, name='h2_relu')

            # 16 x 32
            h3_deconv = tf_utils.deconv2d(h2_relu,
                                          self.gen_c[3],
                                          name='h3_deconv2d')
            h3_batchnorm = tf_utils.batch_norm(h3_deconv,
                                               name='h3_batchnorm',
                                               _ops=self._gen_train_ops)
            h3_relu = tf.nn.relu(h3_batchnorm, name='h3_relu')

            # 32 x 64
            h4_deconv = tf_utils.deconv2d(h3_relu,
                                          self.gen_c[4],
                                          name='h4_deconv2d')
            h4_batchnorm = tf_utils.batch_norm(h4_deconv,
                                               name='h4_batchnorm',
                                               _ops=self._gen_train_ops)
            h4_relu = tf.nn.relu(h4_batchnorm, name='h4_relu')

            # 64 x 128
            h5_deconv = tf_utils.deconv2d(h4_relu,
                                          self.gen_c[5],
                                          name='h5_deconv2d')
            h5_batchnorm = tf_utils.batch_norm(h5_deconv,
                                               name='h5_batchnorm',
                                               _ops=self._gen_train_ops)
            h5_relu = tf.nn.relu(h5_batchnorm, name='h5_relu')

            # 128 x 256
            output = tf_utils.deconv2d(h5_relu,
                                       self.image_size[2],
                                       name='h6_deconv2d')
            return tf.nn.tanh(output)
Exemple #12
0
    def __call__(self, x):
        with tf.variable_scope(self.name, reuse=self.reuse):
            tf_utils.print_activations(x)

            # (300, 200) -> (150, 100)
            e0_conv2d = tf_utils.conv2d(x, self.gen_c[0], name='e0_conv2d')
            e0_lrelu = tf_utils.lrelu(e0_conv2d, name='e0_lrelu')

            # (150, 100) -> (75, 50)
            e1_conv2d = tf_utils.conv2d(e0_lrelu,
                                        self.gen_c[1],
                                        name='e1_conv2d')
            e1_batchnorm = tf_utils.batch_norm(e1_conv2d,
                                               name='e1_batchnorm',
                                               _ops=self._ops)
            e1_lrelu = tf_utils.lrelu(e1_batchnorm, name='e1_lrelu')

            # (75, 50) -> (38, 25)
            e2_conv2d = tf_utils.conv2d(e1_lrelu,
                                        self.gen_c[2],
                                        name='e2_conv2d')
            e2_batchnorm = tf_utils.batch_norm(e2_conv2d,
                                               name='e2_batchnorm',
                                               _ops=self._ops)
            e2_lrelu = tf_utils.lrelu(e2_batchnorm, name='e2_lrelu')

            # (38, 25) -> (19, 13)
            e3_conv2d = tf_utils.conv2d(e2_lrelu,
                                        self.gen_c[3],
                                        name='e3_conv2d')
            e3_batchnorm = tf_utils.batch_norm(e3_conv2d,
                                               name='e3_batchnorm',
                                               _ops=self._ops)
            e3_lrelu = tf_utils.lrelu(e3_batchnorm, name='e3_lrelu')

            # (19, 13) -> (10, 7)
            e4_conv2d = tf_utils.conv2d(e3_lrelu,
                                        self.gen_c[4],
                                        name='e4_conv2d')
            e4_batchnorm = tf_utils.batch_norm(e4_conv2d,
                                               name='e4_batchnorm',
                                               _ops=self._ops)
            e4_lrelu = tf_utils.lrelu(e4_batchnorm, name='e4_lrelu')

            # (10, 7) -> (5, 4)
            e5_conv2d = tf_utils.conv2d(e4_lrelu,
                                        self.gen_c[5],
                                        name='e5_conv2d')
            e5_batchnorm = tf_utils.batch_norm(e5_conv2d,
                                               name='e5_batchnorm',
                                               _ops=self._ops)
            e5_lrelu = tf_utils.lrelu(e5_batchnorm, name='e5_lrelu')

            # (5, 4) -> (3, 2)
            e6_conv2d = tf_utils.conv2d(e5_lrelu,
                                        self.gen_c[6],
                                        name='e6_conv2d')
            e6_batchnorm = tf_utils.batch_norm(e6_conv2d,
                                               name='e6_batchnorm',
                                               _ops=self._ops)
            e6_lrelu = tf_utils.lrelu(e6_batchnorm, name='e6_lrelu')

            # (3, 2) -> (2, 1)
            e7_conv2d = tf_utils.conv2d(e6_lrelu,
                                        self.gen_c[7],
                                        name='e7_conv2d')
            e7_batchnorm = tf_utils.batch_norm(e7_conv2d,
                                               name='e7_batchnorm',
                                               _ops=self._ops)
            e7_relu = tf_utils.relu(e7_batchnorm, name='e7_relu')

            # (2, 1) -> (4, 2)
            d0_deconv = tf_utils.deconv2d(e7_relu,
                                          self.gen_c[8],
                                          name='d0_deconv2d')
            shapeA = e6_conv2d.get_shape().as_list()[1]
            shapeB = d0_deconv.get_shape().as_list()[1] - e6_conv2d.get_shape(
            ).as_list()[1]
            # (4, 2) -> (3, 2)
            d0_split, _ = tf.split(d0_deconv, [shapeA, shapeB],
                                   axis=1,
                                   name='d0_split')
            tf_utils.print_activations(d0_split)
            d0_batchnorm = tf_utils.batch_norm(d0_split,
                                               name='d0_batchnorm',
                                               _ops=self._ops)
            d0_drop = tf.nn.dropout(d0_batchnorm,
                                    keep_prob=0.5,
                                    name='d0_dropout')
            d0_concat = tf.concat([d0_drop, e6_batchnorm],
                                  axis=3,
                                  name='d0_concat')
            d0_relu = tf_utils.relu(d0_concat, name='d0_relu')

            # (3, 2) -> (6, 4)
            d1_deconv = tf_utils.deconv2d(d0_relu,
                                          self.gen_c[9],
                                          name='d1_deconv2d')
            # (6, 4) -> (5, 4)
            shapeA = e5_batchnorm.get_shape().as_list()[1]
            shapeB = d1_deconv.get_shape().as_list(
            )[1] - e5_batchnorm.get_shape().as_list()[1]
            d1_split, _ = tf.split(d1_deconv, [shapeA, shapeB],
                                   axis=1,
                                   name='d1_split')
            tf_utils.print_activations(d1_split)
            d1_batchnorm = tf_utils.batch_norm(d1_split,
                                               name='d1_batchnorm',
                                               _ops=self._ops)
            d1_drop = tf.nn.dropout(d1_batchnorm,
                                    keep_prob=0.5,
                                    name='d1_dropout')
            d1_concat = tf.concat([d1_drop, e5_batchnorm],
                                  axis=3,
                                  name='d1_concat')
            d1_relu = tf_utils.relu(d1_concat, name='d1_relu')

            # (5, 4) -> (10, 8)
            d2_deconv = tf_utils.deconv2d(d1_relu,
                                          self.gen_c[10],
                                          name='d2_deconv2d')
            # (10, 8) -> (10, 7)
            shapeA = e4_batchnorm.get_shape().as_list()[2]
            shapeB = d2_deconv.get_shape().as_list(
            )[2] - e4_batchnorm.get_shape().as_list()[2]
            d2_split, _ = tf.split(d2_deconv, [shapeA, shapeB],
                                   axis=2,
                                   name='d2_split')
            tf_utils.print_activations(d2_split)
            d2_batchnorm = tf_utils.batch_norm(d2_split,
                                               name='d2_batchnorm',
                                               _ops=self._ops)
            d2_drop = tf.nn.dropout(d2_batchnorm,
                                    keep_prob=0.5,
                                    name='d2_dropout')
            d2_concat = tf.concat([d2_drop, e4_batchnorm],
                                  axis=3,
                                  name='d2_concat')
            d2_relu = tf_utils.relu(d2_concat, name='d2_relu')

            # (10, 7) -> (20, 14)
            d3_deconv = tf_utils.deconv2d(d2_relu,
                                          self.gen_c[11],
                                          name='d3_deconv2d')
            # (20, 14) -> (19, 14)
            shapeA = e3_batchnorm.get_shape().as_list()[1]
            shapeB = d3_deconv.get_shape().as_list(
            )[1] - e3_batchnorm.get_shape().as_list()[1]
            d3_split_1, _ = tf.split(d3_deconv, [shapeA, shapeB],
                                     axis=1,
                                     name='d3_split_1')
            tf_utils.print_activations(d3_split_1)
            # (19, 14) -> (19, 13)
            shapeA = e3_batchnorm.get_shape().as_list()[2]
            shapeB = d3_split_1.get_shape().as_list(
            )[2] - e3_batchnorm.get_shape().as_list()[2]
            d3_split_2, _ = tf.split(d3_split_1, [shapeA, shapeB],
                                     axis=2,
                                     name='d3_split_2')
            tf_utils.print_activations(d3_split_2)
            d3_batchnorm = tf_utils.batch_norm(d3_split_2,
                                               name='d3_batchnorm',
                                               _ops=self._ops)
            d3_concat = tf.concat([d3_batchnorm, e3_batchnorm],
                                  axis=3,
                                  name='d3_concat')
            d3_relu = tf_utils.relu(d3_concat, name='d3_relu')

            # (19, 13) -> (38, 26)
            d4_deconv = tf_utils.deconv2d(d3_relu,
                                          self.gen_c[12],
                                          name='d4_deconv2d')
            # (38, 26) -> (38, 25)
            shapeA = e2_batchnorm.get_shape().as_list()[2]
            shapeB = d4_deconv.get_shape().as_list(
            )[2] - e2_batchnorm.get_shape().as_list()[2]
            d4_split, _ = tf.split(d4_deconv, [shapeA, shapeB],
                                   axis=2,
                                   name='d4_split')
            tf_utils.print_activations(d4_split)
            d4_batchnorm = tf_utils.batch_norm(d4_split,
                                               name='d4_batchnorm',
                                               _ops=self._ops)
            d4_concat = tf.concat([d4_batchnorm, e2_batchnorm],
                                  axis=3,
                                  name='d4_concat')
            d4_relu = tf_utils.relu(d4_concat, name='d4_relu')

            # (38, 25) -> (76, 50)
            d5_deconv = tf_utils.deconv2d(d4_relu,
                                          self.gen_c[13],
                                          name='d5_deconv2d')
            # (76, 50) -> (75, 50)
            shapeA = e1_batchnorm.get_shape().as_list()[1]
            shapeB = d5_deconv.get_shape().as_list(
            )[1] - e1_batchnorm.get_shape().as_list()[1]
            d5_split, _ = tf.split(d5_deconv, [shapeA, shapeB],
                                   axis=1,
                                   name='d5_split')
            tf_utils.print_activations(d5_split)
            d5_batchnorm = tf_utils.batch_norm(d5_split,
                                               name='d5_batchnorm',
                                               _ops=self._ops)
            d5_concat = tf.concat([d5_batchnorm, e1_batchnorm],
                                  axis=3,
                                  name='d5_concat')
            d5_relu = tf_utils.relu(d5_concat, name='d5_relu')

            # (75, 50) -> (150, 100)
            d6_deconv = tf_utils.deconv2d(d5_relu,
                                          self.gen_c[14],
                                          name='d6_deconv2d')
            d6_batchnorm = tf_utils.batch_norm(d6_deconv,
                                               name='d6_batchnorm',
                                               _ops=self._ops)
            d6_concat = tf.concat([d6_batchnorm, e0_conv2d],
                                  axis=3,
                                  name='d6_concat')
            d6_relu = tf_utils.relu(d6_concat, name='d6_relu')

            # (150, 100) -> (300, 200)
            d7_deconv = tf_utils.deconv2d(d6_relu,
                                          self.gen_c[15],
                                          name='d7_deconv2d')
            output = tf_utils.tanh(d7_deconv, name='output_tanh')

            # set reuse=True for next call
            self.reuse = True
            self.variables = tf.get_collection(
                tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name)

            return output
Exemple #13
0
    def generator(self, data, y, name='g_', is_reuse=False):
        with tf.variable_scope(name) as scope:
            if is_reuse is True:
                scope.reuse_variables()

            data_flatten = flatten(data)

            if not self.flags.y_dim:
                # 4 x 4
                h0_linear = tf_utils.linear(data_flatten, 4 * 4 * self.gen_c[0], name='h0_linear')
                h0_reshape = tf.reshape(h0_linear, [tf.shape(h0_linear)[0], 4, 4, self.gen_c[0]])
                h0_batchnorm = tf_utils.batch_norm(h0_reshape, name='h0_batchnorm', _ops=self._gen_train_ops)
                h0_relu = tf.nn.relu(h0_batchnorm, name='h0_relu')

                # 8 x 8
                h1_deconv = tf_utils.deconv2d(h0_relu, self.gen_c[1], name='h1_deconv2d')
                h1_batchnorm = tf_utils.batch_norm(h1_deconv, name='h1_batchnorm', _ops=self._gen_train_ops)
                h1_relu = tf.nn.relu(h1_batchnorm, name='h1_relu')

                # 16 x 16
                h2_deconv = tf_utils.deconv2d(h1_relu, self.gen_c[2], name='h2_deconv2d')
                h2_batchnorm = tf_utils.batch_norm(h2_deconv, name='h2_batchnorm', _ops=self._gen_train_ops)
                h2_relu = tf.nn.relu(h2_batchnorm, name='h2_relu')

                # 32 x 32
                h3_deconv = tf_utils.deconv2d(h2_relu, self.gen_c[3], name='h3_deconv2d')
                h3_batchnorm = tf_utils.batch_norm(h3_deconv, name='h3_batchnorm', _ops=self._gen_train_ops)
                h3_relu = tf.nn.relu(h3_batchnorm, name='h3_relu')

                # 64 x 64
                output = tf_utils.deconv2d(h3_relu, self.image_size[2], name='h4_deconv2d')
                return tf.nn.tanh(output)

            else:
                yb = tf.reshape(y, [tf.shape(y)[0], 1, 1, self.flags.y_dim])
                z = concat([data_flatten, y], 1)

                # 4 x 4
                h0_linear = tf_utils.linear(z, 4 * 4 * self.gen_c[0], name='h0_linear')
                h0_reshape = tf.reshape(h0_linear, [tf.shape(h0_linear)[0], 4, 4, self.gen_c[0]])
                h0_batchnorm = tf_utils.batch_norm(h0_reshape, name='h0_batchnorm', _ops=self._gen_train_ops)
                h0_relu = tf.nn.relu(h0_batchnorm, name='h0_relu')
                print((np.shape(h0_relu)))
                print(np.shape(yb))

                h0 = conv_cond_concat(h0_relu, yb)

                # 8 x 8
                h1_deconv = tf_utils.deconv2d(h0, self.gen_c[1], name='h1_deconv2d')
                h1_batchnorm = tf_utils.batch_norm(h1_deconv, name='h1_batchnorm', _ops=self._gen_train_ops)
                h1_relu = tf.nn.relu(h1_batchnorm, name='h1_relu')
                h1 = conv_cond_concat(h1_relu, yb)

                # 16 x 16
                h2_deconv = tf_utils.deconv2d(h1, self.gen_c[2], name='h2_deconv2d')
                h2_batchnorm = tf_utils.batch_norm(h2_deconv, name='h2_batchnorm', _ops=self._gen_train_ops)
                h2_relu = tf.nn.relu(h2_batchnorm, name='h2_relu')
                h2 = conv_cond_concat(h2_relu, yb)

                # 32 x 32
                h3_deconv = tf_utils.deconv2d(h2, self.gen_c[3], name='h3_deconv2d')
                h3_batchnorm = tf_utils.batch_norm(h3_deconv, name='h3_batchnorm', _ops=self._gen_train_ops)
                h3_relu = tf.nn.relu(h3_batchnorm, name='h3_relu')
                h3 = conv_cond_concat(h3_relu, yb)

                # 64 x 64
                output = tf_utils.deconv2d(h3, self.image_size[2], name='h4_deconv2d')
                return tf.nn.tanh(output)
Exemple #14
0
    def discriminator(self, data, y, name='d_', is_reuse=False):
        with tf.variable_scope(name) as scope:
            if is_reuse is True:
                scope.reuse_variables()

            if not self.flags.y_dim:
                # 64 -> 32
                h0_conv = tf_utils.conv2d(data, self.dis_c[0], name='h0_conv2d')
                h0_lrelu = tf_utils.lrelu(h0_conv, name='h0_lrelu')

                # 32 -> 16
                h1_conv = tf_utils.conv2d(h0_lrelu, self.dis_c[1], name='h1_conv2d')
                h1_batchnorm = tf_utils.batch_norm(h1_conv, name='h1_batchnorm', _ops=self._dis_train_ops)
                h1_lrelu = tf_utils.lrelu(h1_batchnorm, name='h1_lrelu')

                # 16 -> 8
                h2_conv = tf_utils.conv2d(h1_lrelu, self.dis_c[2], name='h2_conv2d')
                h2_batchnorm = tf_utils.batch_norm(h2_conv, name='h2_batchnorm', _ops=self._dis_train_ops)
                h2_lrelu = tf_utils.lrelu(h2_batchnorm, name='h2_lrelu')

                # 8 -> 4
                h3_conv = tf_utils.conv2d(h2_lrelu, self.dis_c[3], name='h3_conv2d')
                h3_batchnorm = tf_utils.batch_norm(h3_conv, name='h3_batchnorm', _ops=self._dis_train_ops)
                h3_lrelu = tf_utils.lrelu(h3_batchnorm, name='h3_lrelu')

                h3_flatten = flatten(h3_lrelu)
                h4_linear = tf_utils.linear(h3_flatten, 1, name='h4_linear')

                return tf.nn.sigmoid(h4_linear), h4_linear

            else:
                yb = tf.reshape(y, [tf.shape(y)[0], 1, 1, self.flags.y_dim])
                x = conv_cond_concat(data, yb)

                h0_conv = tf_utils.conv2d(x, self.dis_c[0] + self.flags.y_dim, name='h0_conv2d')
                h0_lrelu = tf_utils.lrelu(h0_conv, name='h0_lrelu')
                h0 = conv_cond_concat(h0_lrelu, yb)

                # 32 -> 16
                h1_conv = tf_utils.conv2d(h0, self.dis_c[1] + self.flags.y_dim, name='h1_conv2d')
                h1_batchnorm = tf_utils.batch_norm(h1_conv, name='h1_batchnorm', _ops=self._dis_train_ops)
                h1_lrelu = tf_utils.lrelu(h1_batchnorm, name='h1_lrelu')
                h1 = conv_cond_concat(h1_lrelu, yb)

                # 16 -> 8
                h2_conv = tf_utils.conv2d(h1, self.dis_c[2], name='h2_conv2d')
                h2_batchnorm = tf_utils.batch_norm(h2_conv, name='h2_batchnorm', _ops=self._dis_train_ops)
                h2_lrelu = tf_utils.lrelu(h2_batchnorm, name='h2_lrelu')
                h2 = conv_cond_concat(h2_lrelu, yb)

                # 8 -> 4
                h3_conv = tf_utils.conv2d(h2, self.dis_c[3], name='h3_conv2d')
                h3_batchnorm = tf_utils.batch_norm(h3_conv, name='h3_batchnorm', _ops=self._dis_train_ops)
                h3_lrelu = tf_utils.lrelu(h3_batchnorm, name='h3_lrelu')

                h3_flatten = flatten(h3_lrelu)
                h3 = concat([h3_flatten, y], 1)

                h4_linear = tf_utils.linear(h3, 1, name='h4_linear')

                return tf.nn.sigmoid(h4_linear), h4_linear
Exemple #15
0
    def generator(self, data, name='g_'):
        with tf.variable_scope(name):
            # 256 -> 128
            e0_conv2d = tf_utils.conv2d(data, self.gen_c[0], name='e0_conv2d')
            e0_lrelu = tf_utils.lrelu(e0_conv2d, name='e0_lrelu')

            # 128 -> 64
            e1_conv2d = tf_utils.conv2d(e0_lrelu,
                                        self.gen_c[1],
                                        name='e1_conv2d')
            e1_batchnorm = tf_utils.batch_norm(e1_conv2d,
                                               name='e1_batchnorm',
                                               _ops=self._gen_train_ops)
            e1_lrelu = tf_utils.lrelu(e1_batchnorm, name='e1_lrelu')

            # 64 -> 32
            e2_conv2d = tf_utils.conv2d(e1_lrelu,
                                        self.gen_c[2],
                                        name='e2_conv2d')
            e2_batchnorm = tf_utils.batch_norm(e2_conv2d,
                                               name='e2_batchnorm',
                                               _ops=self._gen_train_ops)
            e2_lrelu = tf_utils.lrelu(e2_batchnorm, name='e2_lrelu')

            # 32 -> 16
            e3_conv2d = tf_utils.conv2d(e2_lrelu,
                                        self.gen_c[3],
                                        name='e3_conv2d')
            e3_batchnorm = tf_utils.batch_norm(e3_conv2d,
                                               name='e3_batchnorm',
                                               _ops=self._gen_train_ops)
            e3_lrelu = tf_utils.lrelu(e3_batchnorm, name='e3_lrelu')

            # 16 -> 8
            e4_conv2d = tf_utils.conv2d(e3_lrelu,
                                        self.gen_c[4],
                                        name='e4_conv2d')
            e4_batchnorm = tf_utils.batch_norm(e4_conv2d,
                                               name='e4_batchnorm',
                                               _ops=self._gen_train_ops)
            e4_lrelu = tf_utils.lrelu(e4_batchnorm, name='e4_lrelu')

            # 8 -> 4
            e5_conv2d = tf_utils.conv2d(e4_lrelu,
                                        self.gen_c[5],
                                        name='e5_conv2d')
            e5_batchnorm = tf_utils.batch_norm(e5_conv2d,
                                               name='e5_batchnorm',
                                               _ops=self._gen_train_ops)
            e5_lrelu = tf_utils.lrelu(e5_batchnorm, name='e5_lrelu')

            # 4 -> 2
            e6_conv2d = tf_utils.conv2d(e5_lrelu,
                                        self.gen_c[6],
                                        name='e6_conv2d')
            e6_batchnorm = tf_utils.batch_norm(e6_conv2d,
                                               name='e6_batchnorm',
                                               _ops=self._gen_train_ops)
            e6_lrelu = tf_utils.lrelu(e6_batchnorm, name='e6_lrelu')

            # 2 -> 1
            e7_conv2d = tf_utils.conv2d(e6_lrelu,
                                        self.gen_c[7],
                                        name='e7_conv2d')
            e7_batchnorm = tf_utils.batch_norm(e7_conv2d,
                                               name='e7_batchnorm',
                                               _ops=self._gen_train_ops)
            e7_relu = tf.nn.relu(e7_batchnorm, name='e7_relu')

            # 1 -> 2
            d0_deconv = tf_utils.deconv2d(e7_relu,
                                          self.gen_c[8],
                                          name='d0_deconv2d')
            d0_batchnorm = tf_utils.batch_norm(d0_deconv,
                                               name='d0_batchnorm',
                                               _ops=self._gen_train_ops)
            d0_drop = tf.nn.dropout(d0_batchnorm,
                                    keep_prob=0.5,
                                    name='d0_dropout')
            d0_concat = tf.concat([d0_drop, e6_batchnorm],
                                  axis=3,
                                  name='d0_concat')
            d0_relu = tf.nn.relu(d0_concat, name='d0_relu')

            # 2 -> 4
            d1_deconv = tf_utils.deconv2d(d0_relu,
                                          self.gen_c[9],
                                          name='d1_deconv2d')
            d1_batchnorm = tf_utils.batch_norm(d1_deconv,
                                               name='d1_batchnorm',
                                               _ops=self._gen_train_ops)
            d1_drop = tf.nn.dropout(d1_batchnorm,
                                    keep_prob=0.5,
                                    name='d1_dropout')
            d1_concat = tf.concat([d1_drop, e5_batchnorm],
                                  axis=3,
                                  name='d1_concat')
            d1_relu = tf.nn.relu(d1_concat, name='d1_relu')

            # 4 -> 8
            d2_deconv = tf_utils.deconv2d(d1_relu,
                                          self.gen_c[10],
                                          name='d2_deconv2d')
            d2_batchnorm = tf_utils.batch_norm(d2_deconv,
                                               name='d2_batchnorm',
                                               _ops=self._gen_train_ops)
            d2_drop = tf.nn.dropout(d2_batchnorm,
                                    keep_prob=0.5,
                                    name='d2_dropout')
            d2_concat = tf.concat([d2_drop, e4_batchnorm],
                                  axis=3,
                                  name='d2_concat')
            d2_relu = tf.nn.relu(d2_concat, name='d2_relu')

            # 8 -> 16
            d3_deconv = tf_utils.deconv2d(d2_relu,
                                          self.gen_c[11],
                                          name='d3_deconv2d')
            d3_batchnorm = tf_utils.batch_norm(d3_deconv,
                                               name='d3_batchnorm',
                                               _ops=self._gen_train_ops)
            d3_concat = tf.concat([d3_batchnorm, e3_batchnorm],
                                  axis=3,
                                  name='d3_concat')
            d3_relu = tf.nn.relu(d3_concat, name='d3_relu')

            # 16 -> 32
            d4_deconv = tf_utils.deconv2d(d3_relu,
                                          self.gen_c[12],
                                          name='d4_deconv2d')
            d4_batchnorm = tf_utils.batch_norm(d4_deconv,
                                               name='d4_batchnorm',
                                               _ops=self._gen_train_ops)
            d4_concat = tf.concat([d4_batchnorm, e2_batchnorm],
                                  axis=3,
                                  name='d4_concat')
            d4_relu = tf.nn.relu(d4_concat, name='d4_relu')

            # 32 -> 64
            d5_deconv = tf_utils.deconv2d(d4_relu,
                                          self.gen_c[13],
                                          name='d5_deconv2d')
            d5_batchnorm = tf_utils.batch_norm(d5_deconv,
                                               name='d5_batchnorm',
                                               _ops=self._gen_train_ops)
            d5_concat = tf.concat([d5_batchnorm, e1_batchnorm],
                                  axis=3,
                                  name='d5_concat')
            d5_relu = tf.nn.relu(d5_concat, name='d5_relu')

            # 64 -> 128
            d6_deconv = tf_utils.deconv2d(d5_relu,
                                          self.gen_c[14],
                                          name='d6_deconv2d')
            d6_batchnorm = tf_utils.batch_norm(d6_deconv,
                                               name='d6_batchnorm',
                                               _ops=self._gen_train_ops)
            d6_concat = tf.concat([d6_batchnorm, e0_conv2d],
                                  axis=3,
                                  name='d6_concat')
            d6_relu = tf.nn.relu(d6_concat, name='d6_relu')

            # 128 -> 256
            d7_deconv = tf_utils.deconv2d(d6_relu,
                                          self.gen_c[15],
                                          name='d7_deconv2d')

            return tf.nn.tanh(d7_deconv)