def _create(self):

        # placeholders for training data
        x_images_holder = tf.placeholder(
            tf.float32,
            shape=[None, A.img_size, A.img_size, A.input_channels],
            name='x_images_holder')
        y_images_holder = tf.placeholder(
            tf.float32,
            shape=[None, A.img_size, A.img_size, A.output_channels],
            name='y_images_holder')

        # forward pass
        G = Generator()
        D = Discriminator()
        generated_images = G.create(x_images_holder)
        Dx = D.score_patches(x_images_holder, y_images_holder)
        Dg = D.score_patches(x_images_holder, generated_images, reuse=True)
        prob_x = tf.reduce_mean(Dx, name='prob_x')
        prob_g = tf.reduce_mean(Dg, name='prob_g')

        # compute losses
        loss_d, loss_g, loss_L1 = self._loss(Dx, Dg, y_images_holder,
                                             generated_images)

        # optimizers
        optimizer_g = tf.train.AdamOptimizer(learning_rate=0.0002, beta1=0.5)
        optimizer_d = tf.train.AdamOptimizer(learning_rate=0.0002, beta1=0.5)

        # backprop
        g_vars = tf.trainable_variables(scope='generator')
        d_vars = tf.trainable_variables(scope='discriminator')
        train_g = optimizer_g.minimize(loss_g, var_list=g_vars, name='train_g')
        train_d = optimizer_d.minimize(loss_d, var_list=d_vars, name='train_d')

        # prepare summaries
        loss_d_summary_op = tf.summary.scalar('Discriminator_Loss', loss_d)
        loss_g_summary_op = tf.summary.scalar('Generator_Loss', loss_g)
        images_summary_op = tf.summary.image('Generated_Image',
                                             generated_images,
                                             max_outputs=1)
        x_summary_op = tf.summary.image('X_Image',
                                        x_images_holder,
                                        max_outputs=1)
        y_summary_op = tf.summary.image('Y_Image',
                                        y_images_holder,
                                        max_outputs=1)
        dx_summary_op = tf.summary.scalar('prob_x', prob_x)
        dg_summary_op = tf.summary.scalar('prob_g', prob_g)
        l1_summary_op = tf.summary.scalar('loss_L1', loss_L1)
        summary_op = tf.summary.merge_all()
    def test_discriminator(self):
        D = Discriminator()

        # create two fake tensors of shape [batch_size, img_size, img_size, output_channels]
        batch_size = 128
        output_shape = [batch_size, A.img_size, A.img_size, A.output_channels]
        input_images = tf.random_uniform(output_shape, minval=-1, maxval=1)
        output_images = tf.random_uniform(output_shape, minval=-1, maxval=1)

        # get output from D
        output = D.score_patches(input_images, output_images)

        # confirm output shape has one channel
        self.assertEqual(output.shape[3], 1)