def discriminator_image(self, data, name='d_', is_reuse=False):
        with tf.variable_scope(name) as scope:
            if is_reuse is True:
                scope.reuse_variables()

            # conv1: (N, 512, 512, 1) -> (N,, 128, 128, 64)
            conv1 = tf_utils.conv2d(data, self.dis_c, k_h=3, k_w=3, d_h=2, d_w=2, name='conv1_conv1')
            conv1 = tf_utils.batch_norm(conv1, name='conv1_batch1', _ops=self._dis_train_ops)
            conv1 = tf.nn.relu(conv1, name='conv1_relu1')
            conv1 = tf_utils.conv2d(conv1, self.dis_c, k_h=3, k_w=3, d_h=1, d_w=1, name='conv1_conv2')
            conv1 = tf_utils.batch_norm(conv1, name='conv1_batch2', _ops=self._dis_train_ops)
            conv1 = tf.nn.relu(conv1, name='conv1_relu2')
            pool1 = tf_utils.max_pool_2x2(conv1, name='maxpool1')

            # conv2: (N, 128, 128, 64) -> (N, 32, 32, 128)
            conv2 = tf_utils.conv2d(pool1, 2*self.dis_c, k_h=3, k_w=3, d_h=2, d_w=2, name='conv2_conv1')
            conv2 = tf_utils.batch_norm(conv2, name='conv2_batch1', _ops=self._dis_train_ops)
            conv2 = tf.nn.relu(conv2, name='conv2_relu1')
            conv2 = tf_utils.conv2d(conv2, 2*self.dis_c, k_h=3, k_w=3, d_h=1, d_w=1, name='conv2_conv2')
            conv2 = tf_utils.batch_norm(conv2, name='conv2_batch2', _ops=self._dis_train_ops)
            conv2 = tf.nn.relu(conv2, name='conv2_relu2')
            pool2 = tf_utils.max_pool_2x2(conv2, name='maxpool2')

            # conv3: (N, 32, 32, 128) -> (N, 16, 16, 256)
            conv3 = tf_utils.conv2d(pool2, 4*self.dis_c, k_h=3, k_w=3, d_h=1, d_w=1, name='conv3_conv1')
            conv3 = tf_utils.batch_norm(conv3, name='conv3_batch1', _ops=self._dis_train_ops)
            conv3 = tf.nn.relu(conv3, name='conv3_relu1')
            conv3 = tf_utils.conv2d(conv3, 4*self.dis_c, k_h=3, k_w=3, d_h=1, d_w=1, name='conv3_conv2')
            conv3 = tf_utils.batch_norm(conv3, name='conv3_batch2', _ops=self._dis_train_ops)
            conv3 = tf.nn.relu(conv3, name='conv3_relu2')
            pool3 = tf_utils.max_pool_2x2(conv3, name='maxpool3')

            # conv4: (N, 16, 16, 256) -> (N, 8, 8, 512)
            conv4 = tf_utils.conv2d(pool3, 8*self.dis_c, k_h=3, k_w=3, d_h=1, d_w=1, name='conv4_conv1')
            conv4 = tf_utils.batch_norm(conv4, name='conv4_batch1', _ops=self._dis_train_ops)
            conv4 = tf.nn.relu(conv4, name='conv4_relu1')
            conv4 = tf_utils.conv2d(conv4, 8*self.dis_c, k_h=3, k_w=3, d_h=1, d_w=1, name='conv4_conv2')
            conv4 = tf_utils.batch_norm(conv4, name='conv4_batch2', _ops=self._dis_train_ops)
            conv4 = tf.nn.relu(conv4, name='conv4_relu2')
            pool4 = tf_utils.max_pool_2x2(conv4, name='maxpool4')

            # conv5: (N, 8, 8, 512) -> (N, 8, 8, 1024)
            conv5 = tf_utils.conv2d(pool4, 16*self.dis_c, k_h=3, k_w=3, d_h=1, d_w=1, name='conv5_conv1')
            conv5 = tf_utils.batch_norm(conv5, name='conv5_batch1', _ops=self._dis_train_ops)
            conv5 = tf.nn.relu(conv5, name='conv5_relu1')
            conv5 = tf_utils.conv2d(conv5, 16*self.dis_c, k_h=3, k_w=3, d_h=1, d_w=1, name='conv5_conv2')
            conv5 = tf_utils.batch_norm(conv5, name='conv5_batch2', _ops=self._dis_train_ops)
            conv5 = tf.nn.relu(conv5, name='conv5_relu2')

            # output layer: (N, 8, 8, 1024) -> (N, 1, 1, 1024) -> (N, 1)
            shape = conv5.get_shape().as_list()
            gap = tf.layers.average_pooling2d(inputs=conv5, pool_size=shape[1], strides=1, padding='VALID',
                                              name='global_vaerage_pool')
            gap_flatten = tf.reshape(gap, [-1, 16*self.dis_c])
            output = tf_utils.linear(gap_flatten, 1, name='linear_output')

            return tf.nn.sigmoid(output), output
    def discriminator_patch2(self, data, name='d_', is_reuse=False):
        with tf.variable_scope(name) as scope:
            if is_reuse is True:
                scope.reuse_variables()

            # conv1: (N, 512, 512, 1) -> (N,, 128, 128, 64)
            conv1 = tf_utils.conv2d(data, self.dis_c, k_h=3, k_w=3, d_h=2, d_w=2, name='conv1_conv1')
            conv1 = tf_utils.batch_norm(conv1, name='conv1_batch1', _ops=self._dis_train_ops)
            conv1 = tf.nn.relu(conv1, name='conv1_relu1')
            conv1 = tf_utils.conv2d(conv1, self.dis_c, k_h=3, k_w=3, d_h=1, d_w=1, name='conv1_conv2')
            conv1 = tf_utils.batch_norm(conv1, name='conv1_batch2', _ops=self._dis_train_ops)
            conv1 = tf.nn.relu(conv1, name='conv1_relu2')
            pool1 = tf_utils.max_pool_2x2(conv1, name='maxpool1')

            # conv2: (N, 128, 128, 64) -> (N, 128, 128, 128)
            conv2 = tf_utils.conv2d(pool1, 2*self.dis_c, k_h=3, k_w=3, d_h=1, d_w=1, name='conv2_conv1')
            conv2 = tf_utils.batch_norm(conv2, name='conv2_batch1', _ops=self._dis_train_ops)
            conv2 = tf.nn.relu(conv2, name='conv2_relu1')
            conv2 = tf_utils.conv2d(conv2, 2*self.dis_c, k_h=3, k_w=3, d_h=1, d_w=1, name='conv2_conv2')
            conv2 = tf_utils.batch_norm(conv2, name='conv2_batch2', _ops=self._dis_train_ops)
            conv2 = tf.nn.relu(conv2, name='conv2_relu2')
            pool2 = tf_utils.max_pool_2x2(conv2, name='maxpool2')

            # conv3: (N, 128, 128, 128) -> (N, 128, 128, 256)
            conv3 = tf_utils.conv2d(pool2, 4*self.dis_c, k_h=3, k_w=3, d_h=1, d_w=1, name='conv3_conv1')
            conv3 = tf_utils.batch_norm(conv3, name='conv3_batch1', _ops=self._dis_train_ops)
            conv3 = tf.nn.relu(conv3, name='conv3_relu1')
            conv3 = tf_utils.conv2d(conv3, 4*self.dis_c, k_h=3, k_w=3, d_h=1, d_w=1, name='conv3_conv2')
            conv3 = tf_utils.batch_norm(conv3, name='conv3_batch2', _ops=self._dis_train_ops)
            conv3 = tf.nn.relu(conv3, name='conv3_relu2')

            # output layer: (N, 128, 128, 256) -> (N, 128, 128, 1)
            output = tf_utils.conv2d(conv3, 1, k_h=1, k_w=1, d_h=1, d_w=1, name='conv_output')

            return tf.nn.sigmoid(output), output
    def generator(self, data, name='g_'):
        with tf.variable_scope(name):
            #(512,512,1) -> (512,512,64)
            conv1 = tf_utils.conv2d(data, self.gen_c, k_h=3, k_w=3,d_h=1, d_w=1, name='conv1_conv1')
            conv1 = tf_utils.lrelu(conv1, name='conv1_lrelu1')

            #(512,512,64) -> (256,256,128)
            conv2 = tf_utils.conv2d(conv1, 2*self.gen_c, k_h=3, k_w=3, d_h=2, d_w=2, name='conv2_conv1')
            conv2 = tf_utils.batch_norm(conv2, name='conv2_batch1',_ops=self._gen_train_ops)
            conv2 = tf_utils.lrelu(conv2, name='conv2_lrelu1')

            #(256,256,128) -> (128,128,256)
            conv3 = tf_utils.conv2d(conv2, 4*self.gen_c, k_h=3, k_w=3, d_h=2, d_w=2, name='conv3_conv1')
            conv3 = tf_utils.batch_norm(conv3, name='conv3_batch1',_ops=self._gen_train_ops)
            conv3 = tf_utils.lrelu(conv3, name='conv3_lrelu1')

            #(128,128,256) -> (128,128,256)
            res1 = tf_utils.residual_block(conv3, 4*self.gen_c, k_h=3, k_w=3, d_h=1, d_w=1, stddev=0.02, name_prefix='residual_block_1_',_ops=self._gen_train_ops)
            res2 = tf_utils.residual_block(res1, 4*self.gen_c, k_h=3, k_w=3, d_h=1, d_w=1, stddev=0.02, name_prefix='residual_block_2_',_ops=self._gen_train_ops)
            res3 = tf_utils.residual_block(res2, 4*self.gen_c, k_h=3, k_w=3, d_h=1, d_w=1, stddev=0.02, name_prefix='residual_block_3_',_ops=self._gen_train_ops)
            res4 = tf_utils.residual_block(res3, 4*self.gen_c, k_h=3, k_w=3, d_h=1, d_w=1, stddev=0.02, name_prefix='residual_block_4_',_ops=self._gen_train_ops)
            res5 = tf_utils.residual_block(res4, 4*self.gen_c, k_h=3, k_w=3, d_h=1, d_w=1, stddev=0.02, name_prefix='residual_block_5_',_ops=self._gen_train_ops)
            res6 = tf_utils.residual_block(res5, 4*self.gen_c, k_h=3, k_w=3, d_h=1, d_w=1, stddev=0.02, name_prefix='residual_block_6_',_ops=self._gen_train_ops)
            res7 = tf_utils.residual_block(res6, 4*self.gen_c, k_h=3, k_w=3, d_h=1, d_w=1, stddev=0.02, name_prefix='residual_block_7_',_ops=self._gen_train_ops)
            res8 = tf_utils.residual_block(res7, 4*self.gen_c, k_h=3, k_w=3, d_h=1, d_w=1, stddev=0.02, name_prefix='residual_block_8_',_ops=self._gen_train_ops)
            res9 = tf_utils.residual_block(res8, 4*self.gen_c, k_h=3, k_w=3, d_h=1, d_w=1, stddev=0.02, name_prefix='residual_block_9_',_ops=self._gen_train_ops)

            #(128,128,256) -> (256,256,128) + conv2
            deconv1 = tf_utils.deconv2d(res9, [1,256,256,128], k_h=3, k_w=3, d_h=2, d_w=2, name='deconv1_deconv1')
            deconv1 = tf_utils.batch_norm(deconv1, name='deconv1_batch1', _ops=self._gen_train_ops)
            deconv1 = tf.nn.relu(deconv1, name='deconv1_relu1')
            deconv1 = tf.concat([deconv1, conv2], axis=3, name='deconv1_concat1')

            #(256,256,256) -> (512,512,64) + conv1
            deconv2 = tf_utils.deconv2d(deconv1, [1,512,512,64], k_h=3, k_w=3, d_h=2, d_w=2, name='deconv2_deconv1')
            deconv2 = tf_utils.batch_norm(deconv2, name='deconv2_batch1', _ops=self._gen_train_ops)
            deconv2 = tf.nn.relu(deconv2, name='deconv2_relu1')
            deconv2 = tf.concat([deconv2, conv1], axis=3, name='deconv2_concat1')

            deconv3 = tf_utils.deconv2d(deconv2, [1,512,512,1], k_h=1, k_w=1, d_h=1, d_w=1, name='deconv3_deconv1')
            deconv3 = tf.nn.tanh(deconv3, name='deconv3_tanh1')

        return deconv3
    def discriminator_pixel(self, data, name='d_', is_reuse=False):
        with tf.variable_scope(name) as scope:
            if is_reuse is True:
                scope.reuse_variables()

            # conv1: (N, 512, 512, 1) -> (N,, 512, 512, 64)
            conv1 = tf_utils.conv2d(data, self.dis_c, k_h=3, k_w=3, d_h=1, d_w=1, name='conv1_conv1')
            conv1 = tf_utils.lrelu(conv1, name='conv1_lrelu1')

            # conv2: (N, 512, 512, 64) -> (N, 512, 512, 128)
            conv2 = tf_utils.conv2d(conv1, 2*self.dis_c, k_h=3, k_w=3, d_h=1, d_w=1, name='conv2_conv1')
            conv2 = tf_utils.lrelu(conv2)

            # conv3: (N, 512, 512, 128) -> (N, 512, 512, 256)
            conv3 = tf_utils.conv2d(conv2, 4*self.dis_c, k_h=3, k_w=3, d_h=1, d_w=1, name='conv3_conv1')
            conv3 = tf_utils.lrelu(conv3)

            # output layer: (N, 512, 512, 256) -> (N, 512, 512, 1)
            output = tf_utils.conv2d(conv3, 1, k_h=1, k_w=1, d_h=1, d_w=1, name='conv_output')

            return tf.nn.sigmoid(output), output