def test(self, X_data, gt, mask, k=0):  #X_data,gt,mask,

        # run_config = tf.ConfigProto()
        # run_config.gpu_options.allow_growth = True
        # self.sess = tf.Session(config=run_config)

        self.dataset = Dataset(X_data, gt, mask, self.flags)
        self.channels = X_data.shape[3]

        self.model = MPCGAN(self.sess, self.flags, self.dataset.image_size,
                            self.channels)

        self.best_auc_sum = 0.
        self._make_folders()

        self.saver = tf.train.Saver()
        self.sess.run(tf.global_variables_initializer())

        tf_utils.show_all_variables()

        if self.load_model():
            print(' [*] Load Success!\n')
            global predprob
            predprob, auc_sum = self.eval(phase='test')
        else:
            print(' [!] Load Failed!\n')

        return predprob
Exemplo n.º 2
0
    def discriminator(self, y_data, name='d_', is_reuse=False):
        with tf.variable_scope(name, reuse=is_reuse):
            y_data = flatten(y_data)
            d0 = tf.nn.relu(
                tf_utils.linear(y_data, self.num_hiddens, name='fc1'))
            d1 = tf_utils.linear(d0, 1, name='fc2')

        return tf.nn.sigmoid(d1), d1
Exemplo n.º 3
0
    def generator(self, x_data, name='g_'):
        with tf.variable_scope(name):
            x_data = flatten(x_data)
            g0 = tf.nn.relu(tf_utils.linear(x_data,
                                            self.num_hiddens,
                                            name='fc1'),
                            name='relu1')
            g1 = tf_utils.linear(g0,
                                 self.image_size[0] * self.image_size[1],
                                 name='fc2')

        return self.out_func(g1)
    def train(self, X_data, gt, mask):

        # run_config = tf.ConfigProto()
        # run_config.gpu_options.allow_growth = True
        # self.sess = tf.Session(config=run_config)

        self.dataset = Dataset(X_data, gt, mask, self.flags)
        self.channels = X_data.shape[3]

        self.model = MPCGAN(self.sess, self.flags, self.dataset.image_size,
                            self.channels)

        self.best_auc_sum = 0.
        self._make_folders()

        self.saver = tf.train.Saver()
        self.sess.run(tf.global_variables_initializer())

        tf_utils.show_all_variables()

        for iter_time in range(0, self.flags.iters + 1,
                               self.flags.train_interval):
            self.sample(iter_time)  # sampling images and save them

            # train discrminator
            for iter_ in range(1, self.flags.train_interval + 1):
                x_imgs, y_imgs = self.dataset.train_next_batch(
                    batch_size=self.flags.batch_size)
                d_loss = self.model.train_dis(x_imgs, y_imgs)
                self.print_info(iter_time + iter_, 'd_loss', d_loss)
                self.dloss_save_placeholder = d_loss

            # train generator
            for iter_ in range(1, self.flags.train_interval + 1):
                x_imgs, y_imgs = self.dataset.train_next_batch(
                    batch_size=self.flags.batch_size)
                g_loss = self.model.train_gen(x_imgs, y_imgs)
                self.print_info(iter_time + iter_, 'g_loss', g_loss)
                self.gloss_save_placeholder = g_loss

            #write loss to tensorboard
            self.model.measure_loss(self.gloss_save_placeholder,
                                    self.dloss_save_placeholder, iter_time)

            valprob, auc_sum = self.eval(iter_time, phase='train')

            if self.best_auc_sum < auc_sum:
                self.best_auc_sum = auc_sum
                self.save_model(iter_time)
    def __init__(self, flags):
        run_config = tf.ConfigProto()
        run_config.gpu_options.allow_growth = True
        self.sess = tf.Session(config=run_config)

        self.flags = flags
        self.dataset = Dataset(self.flags.dataset, self.flags)
        self.model = CGAN(self.sess, self.flags, self.dataset.image_size)

        self.best_auc_sum = 0.
        self._make_folders()

        self.saver = tf.train.Saver()
        self.sess.run(tf.global_variables_initializer())

        tf_utils.show_all_variables()
Exemplo n.º 6
0
    def _tensorboard(self):
        tf.summary.histogram('Dy/real_unpaired', self.Dy_dis(self.y_imgs))
        tf.summary.histogram('Dy/fake_unpaired', self.Dy_dis(self.fake_y_imgs))
        tf.summary.histogram('Dx/real_unpaired', self.Dx_dis(self.x_imgs))
        tf.summary.histogram('Dx/fake_unpaired', self.Dx_dis(self.fake_x_imgs))

        tf.summary.histogram('Dy/real_paired', self.Dy_dis(self.real_xy_pair))
        tf.summary.histogram('Dy/fake_paired', self.Dy_dis(self.fake_xy_pair))
        tf.summary.histogram('Dx/real_paired', self.Dx_dis(self.real_yx_pair))
        tf.summary.histogram('Dx/fake_paired', self.Dx_dis(self.fake_yx_pair))

        tf.summary.scalar('loss/G_gen_unpaired', self.G_gen_loss_unpair)
        tf.summary.scalar('loss/Dy_dis_unpaired', self.Dy_dis_loss_unpair)
        tf.summary.scalar('loss/F_gen_unpaired', self.F_gen_loss_unpair)
        tf.summary.scalar('loss/Dx_dis_unpaired', self.Dx_dis_loss_unpair)

        tf.summary.scalar('loss/G_gen_paired', self.G_gen_loss_pair)
        tf.summary.scalar('loss/Dy_dis_paired', self.Dy_dis_loss_pair)
        tf.summary.scalar('loss/F_gen_paired', self.F_gen_loss_pair)
        tf.summary.scalar('loss/Dx_dis_paired', self.Dx_dis_loss_pair)

        tf.summary.image('X/input', tf_utils.batch_convert2int(self.x_imgs))
        tf.summary.image('X/generated_Y',
                         tf_utils.batch_convert2int(self.G_gen(self.x_imgs)))
        tf.summary.image(
            'X/reconstruction',
            tf_utils.batch_convert2int(self.F_gen(self.G_gen(self.x_imgs))))
        tf.summary.image('Y/input', tf_utils.batch_convert2int(self.y_imgs))
        tf.summary.image('Y/generated_X',
                         tf_utils.batch_convert2int(self.F_gen(self.y_imgs)))
        tf.summary.image(
            'Y/reconstruction',
            tf_utils.batch_convert2int(self.G_gen(self.F_gen(self.y_imgs))))

        self.summary_op = tf.summary.merge_all()
Exemplo n.º 7
0
    def _tensorboard(self):
        tf.summary.histogram('Dy/real', self.Dy_dis(self.y_imgs))
        tf.summary.histogram('Dy/fake', self.Dy_dis(self.G_gen(self.x_imgs)))
        tf.summary.histogram('Dx/real', self.Dx_dis(self.x_imgs))
        tf.summary.histogram('Dx/fake', self.Dx_dis(self.F_gen(self.y_imgs)))

        tf.summary.scalar('loss/G_gen', self.G_gen_loss)
        tf.summary.scalar('loss/Dy_dis', self.Dy_dis_loss)
        tf.summary.scalar('loss/F_gen', self.F_gen_loss)
        tf.summary.scalar('loss/Dx_dis', self.Dx_dis_loss)

        tf.summary.image('X/generated_Y',
                         tf_utils.batch_convert2int(self.G_gen(self.x_imgs)))
        tf.summary.image(
            'X/reconstruction',
            tf_utils.batch_convert2int(self.F_gen(self.G_gen(self.x_imgs))))
        tf.summary.image('Y/generated_X',
                         tf_utils.batch_convert2int(self.F_gen(self.y_imgs)))
        tf.summary.image(
            'Y/reconstruction',
            tf_utils.batch_convert2int(self.G_gen(self.F_gen(self.y_imgs))))

        self.summary_op = tf.summary.merge_all()
    def discriminator_pixel(self, data, name='d_', is_reuse=False):
        with tf.variable_scope(name) as scope:
            if is_reuse is True:
                scope.reuse_variables()

            # conv1: (N, 640, 640, 4) -> (N,, 640, 640, 32)
            conv1 = tf_utils.conv2d(data,
                                    self.dis_c,
                                    k_h=3,
                                    k_w=3,
                                    d_h=1,
                                    d_w=1,
                                    name='conv1_conv1')
            conv1 = tf_utils.lrelu(conv1, name='conv1_lrelu1')

            # conv2: (N, 640, 640, 32) -> (N, 640, 640, 64)
            conv2 = tf_utils.conv2d(conv1,
                                    2 * self.dis_c,
                                    k_h=3,
                                    k_w=3,
                                    d_h=1,
                                    d_w=1,
                                    name='conv2_conv1')
            conv2 = tf_utils.lrelu(conv2)

            # conv3: (N, 640, 640, 64) -> (N, 640, 640, 128)
            conv3 = tf_utils.conv2d(conv2,
                                    4 * self.dis_c,
                                    k_h=3,
                                    k_w=3,
                                    d_h=1,
                                    d_w=1,
                                    name='conv3_conv1')
            conv3 = tf_utils.lrelu(conv3)

            # output layer: (N, 640, 640, 128) -> (N, 640, 640, 1)
            output = tf_utils.conv2d(conv3,
                                     1,
                                     k_h=1,
                                     k_w=1,
                                     d_h=1,
                                     d_w=1,
                                     name='conv_output')

            return tf.nn.sigmoid(output), output
Exemplo n.º 9
0
def generator(data, name='g_'):
    with tf.variable_scope(name):
        # conv1: (N, 640, 640, 1) -> (N, 320, 320, 32)
        conv1 = tf_utils.conv2d(data, gen_c, k_h=3, k_w=3, d_h=1, d_w=1, name='conv1_conv1')
        conv1 = tf_utils.batch_norm(conv1, name='conv1_batch1', _ops=_gen_train_ops)
        conv1 = tf.nn.relu(conv1, name='conv1_relu1')
        conv1 = tf_utils.conv2d(conv1, gen_c, k_h=3, k_w=3, d_h=1, d_w=1, name='conv1_conv2')
        conv1 = tf_utils.batch_norm(conv1, name='conv1_batch2', _ops=_gen_train_ops)
        conv1 = tf.nn.relu(conv1, name='conv1_relu2')
        pool1 = tf_utils.max_pool_2x2(conv1, name='maxpool1')

        # conv2: (N, 320, 320, 32) -> (N, 160, 160, 64)
        conv2 = tf_utils.conv2d(pool1, 2*gen_c, k_h=3, k_w=3, d_h=1, d_w=1, name='conv2_conv1')
        conv2 = tf_utils.batch_norm(conv2, name='conv2_batch1', _ops=_gen_train_ops)
        conv2 = tf.nn.relu(conv2, name='conv2_relu1')
        conv2 = tf_utils.conv2d(conv2, 2*gen_c, k_h=3, k_w=3, d_h=1, d_w=1, name='conv2_conv2')
        conv2 = tf_utils.batch_norm(conv2, name='conv2-batch2', _ops=_gen_train_ops)
        conv2 = tf.nn.relu(conv2, name='conv2_relu2')
        pool2 = tf_utils.max_pool_2x2(conv2, name='maxpool2')

        # conv3: (N, 160, 160, 64) -> (N, 80, 80, 128)
        conv3 = tf_utils.conv2d(pool2, 4*gen_c, k_h=3, k_w=3, d_h=1, d_w=1, name='conv3_conv1')
        conv3 = tf_utils.batch_norm(conv3, name='conv3_batch1', _ops=_gen_train_ops)
        conv3 = tf.nn.relu(conv3, name='conv3_relu1')
        conv3 = tf_utils.conv2d(conv3, 4*gen_c, k_h=3, k_w=3, d_h=1, d_w=1, name='conv3_conv2')
        conv3 = tf_utils.batch_norm(conv3, name='conv3_batch2', _ops=_gen_train_ops)
        conv3 = tf.nn.relu(conv3, name='conv3_relu2')
        pool3 = tf_utils.max_pool_2x2(conv3, name='maxpool3')

        # conv4: (N, 80, 80, 128) -> (N, 40, 40, 256)
        conv4 = tf_utils.conv2d(pool3, 8*gen_c, k_h=3, k_w=3, d_h=1, d_w=1, name='conv4_conv1')
        conv4 = tf_utils.batch_norm(conv4, name='conv4_batch1', _ops=_gen_train_ops)
        conv4 = tf.nn.relu(conv4, name='conv4_relu1')
        conv4 = tf_utils.conv2d(conv4, 8*gen_c, k_h=3, k_w=3, d_h=1, d_w=1, name='conv4_conv2')
        conv4 = tf_utils.batch_norm(conv4, name='conv4_batch2', _ops=_gen_train_ops)
        conv4 = tf.nn.relu(conv4, name='conv4_relu2')
        pool4 = tf_utils.max_pool_2x2(conv4, name='maxpool4')

        # conv5: (N, 40, 40, 256) -> (N, 40, 40, 512)
        conv5 = tf_utils.conv2d(pool4, 16*gen_c, k_h=3, k_w=3, d_h=1, d_w=1, name='conv5_conv1')
        conv5 = tf_utils.batch_norm(conv5, name='conv5_batch1', _ops=_gen_train_ops)
        conv5 = tf.nn.relu(conv5, name='conv5_relu1')
        conv5 = tf_utils.conv2d(conv5, 16*gen_c, k_h=3, k_w=3, d_h=1, d_w=1, name='conv5_conv2')
        conv5 = tf_utils.batch_norm(conv5, name='conv5_batch2', _ops=_gen_train_ops)
        conv5 = tf.nn.relu(conv5, name='conv5_relu2')

        # conv6: (N, 40, 40, 512) -> (N, 80, 80, 256)
        up1 = tf_utils.upsampling2d(conv5, size=(2, 2), name='conv6_up')
        conv6 = tf.concat([up1, conv4], axis=3, name='conv6_concat')
        conv6 = tf_utils.conv2d(conv6, 8*gen_c, k_h=3, k_w=3, d_h=1, d_w=1, name='conv6_conv1')
        conv6 = tf_utils.batch_norm(conv6, name='conv6_batch1', _ops=_gen_train_ops)
        conv6 = tf.nn.relu(conv6, name='conv6_relu1')
        conv6 = tf_utils.conv2d(conv6, 8*gen_c, k_h=3, k_w=3, d_h=1, d_w=1, name='conv6_conv2')
        conv6 = tf_utils.batch_norm(conv6, name='conv6_batch2', _ops=_gen_train_ops)
        conv6 = tf.nn.relu(conv6, name='conv6_relu2')

        # conv7: (N, 80, 80, 256) -> (N, 160, 160, 128)
        up2 = tf_utils.upsampling2d(conv6, size=(2, 2), name='conv7_up')
        conv7 = tf.concat([up2, conv3], axis=3, name='conv7_concat')
        conv7 = tf_utils.conv2d(conv7, 4*gen_c, k_h=3, k_w=3, d_h=1, d_w=1, name='conv7_conv1')
        conv7 = tf_utils.batch_norm(conv7, name='conv7_batch1', _ops=_gen_train_ops)
        conv7 = tf.nn.relu(conv7, name='conv7_relu1')
        conv7 = tf_utils.conv2d(conv7, 4*gen_c, k_h=3, k_w=3, d_h=1, d_w=1, name='conv7_conv2')
        conv7 = tf_utils.batch_norm(conv7, name='conv7_batch2', _ops=_gen_train_ops)
        conv7 = tf.nn.relu(conv7, name='conv7_relu2')

        # conv8: (N, 160, 160, 128) -> (N, 320, 320, 64)
        up3 = tf_utils.upsampling2d(conv7, size=(2, 2), name='conv8_up')
        conv8 = tf.concat([up3, conv2], axis=3, name='conv8_concat')
        conv8 = tf_utils.conv2d(conv8, 2*gen_c, k_h=3, k_w=3, d_h=1, d_w=1, name='conv8_conv1')
        conv8 = tf_utils.batch_norm(conv8, name='conv8_batch1', _ops=_gen_train_ops)
        conv8 = tf.nn.relu(conv8, name='conv8_relu1')
        conv8 = tf_utils.conv2d(conv8, 2*gen_c, k_h=3, k_w=3, d_h=1, d_w=1, name='conv8_conv2')
        conv8 = tf_utils.batch_norm(conv8, name='conv8_batch2', _ops=_gen_train_ops)
        conv8 = tf.nn.relu(conv8, name='conv8_relu2')

        # conv9: (N, 320, 320, 64) -> (N, 640, 640, 32)
        up4 = tf_utils.upsampling2d(conv8, size=(2, 2), name='conv9_up')
        conv9 = tf.concat([up4, conv1], axis=3, name='conv9_concat')
        conv9 = tf_utils.conv2d(conv9, gen_c, k_h=3, k_w=3, d_h=1, d_w=1, name='conv9_conv1')
        conv9 = tf_utils.batch_norm(conv9, name='conv9_batch1', _ops=_gen_train_ops)
        conv9 = tf.nn.relu(conv9, name='conv9_relu1')
        conv9 = tf_utils.conv2d(conv9, gen_c, k_h=3, k_w=3, d_h=1, d_w=1, name='conv9_conv2')
        conv9 = tf_utils.batch_norm(conv9, name='conv9_batch2', _ops=_gen_train_ops)
        conv9 = tf.nn.relu(conv9, name='conv9_relu2')

        # output layer: (N, 640, 640, 32) -> (N, 640, 640, 1)
        output = tf_utils.conv2d(conv9, 1, k_h=1, k_w=1, d_h=1, d_w=1, name='conv_output')

        return tf.nn.sigmoid(output)
    def discriminator_image(self, data, name='d_', is_reuse=False):
        with tf.variable_scope(name) as scope:
            if is_reuse is True:
                scope.reuse_variables()

            # conv1: (N, 640, 640, 4) -> (N,, 160, 160, 32)
            conv1 = tf_utils.conv2d(data,
                                    self.dis_c,
                                    k_h=3,
                                    k_w=3,
                                    d_h=2,
                                    d_w=2,
                                    name='conv1_conv1')
            conv1 = tf_utils.batch_norm(conv1,
                                        name='conv1_batch1',
                                        _ops=self._dis_train_ops)
            conv1 = tf.nn.relu(conv1, name='conv1_relu1')
            conv1 = tf_utils.conv2d(conv1,
                                    self.dis_c,
                                    k_h=3,
                                    k_w=3,
                                    d_h=1,
                                    d_w=1,
                                    name='conv1_conv2')
            conv1 = tf_utils.batch_norm(conv1,
                                        name='conv1_batch2',
                                        _ops=self._dis_train_ops)
            conv1 = tf.nn.relu(conv1, name='conv1_relu2')
            pool1 = tf_utils.max_pool_2x2(conv1, name='maxpool1')

            # conv2: (N, 160, 160, 32) -> (N, 40, 40, 64)
            conv2 = tf_utils.conv2d(pool1,
                                    2 * self.dis_c,
                                    k_h=3,
                                    k_w=3,
                                    d_h=2,
                                    d_w=2,
                                    name='conv2_conv1')
            conv2 = tf_utils.batch_norm(conv2,
                                        name='conv2_batch1',
                                        _ops=self._dis_train_ops)
            conv2 = tf.nn.relu(conv2, name='conv2_relu1')
            conv2 = tf_utils.conv2d(conv2,
                                    2 * self.dis_c,
                                    k_h=3,
                                    k_w=3,
                                    d_h=1,
                                    d_w=1,
                                    name='conv2_conv2')
            conv2 = tf_utils.batch_norm(conv2,
                                        name='conv2_batch2',
                                        _ops=self._dis_train_ops)
            conv2 = tf.nn.relu(conv2, name='conv2_relu2')
            pool2 = tf_utils.max_pool_2x2(conv2, name='maxpool2')

            # conv3: (N, 40, 40, 64) -> (N, 20, 20, 128)
            conv3 = tf_utils.conv2d(pool2,
                                    4 * self.dis_c,
                                    k_h=3,
                                    k_w=3,
                                    d_h=1,
                                    d_w=1,
                                    name='conv3_conv1')
            conv3 = tf_utils.batch_norm(conv3,
                                        name='conv3_batch1',
                                        _ops=self._dis_train_ops)
            conv3 = tf.nn.relu(conv3, name='conv3_relu1')
            conv3 = tf_utils.conv2d(conv3,
                                    4 * self.dis_c,
                                    k_h=3,
                                    k_w=3,
                                    d_h=1,
                                    d_w=1,
                                    name='conv3_conv2')
            conv3 = tf_utils.batch_norm(conv3,
                                        name='conv3_batch2',
                                        _ops=self._dis_train_ops)
            conv3 = tf.nn.relu(conv3, name='conv3_relu2')
            pool3 = tf_utils.max_pool_2x2(conv3, name='maxpool3')

            # conv4: (N, 20, 20, 128) -> (N, 10, 10, 256)
            conv4 = tf_utils.conv2d(pool3,
                                    8 * self.dis_c,
                                    k_h=3,
                                    k_w=3,
                                    d_h=1,
                                    d_w=1,
                                    name='conv4_conv1')
            conv4 = tf_utils.batch_norm(conv4,
                                        name='conv4_batch1',
                                        _ops=self._dis_train_ops)
            conv4 = tf.nn.relu(conv4, name='conv4_relu1')
            conv4 = tf_utils.conv2d(conv4,
                                    8 * self.dis_c,
                                    k_h=3,
                                    k_w=3,
                                    d_h=1,
                                    d_w=1,
                                    name='conv4_conv2')
            conv4 = tf_utils.batch_norm(conv4,
                                        name='conv4_batch2',
                                        _ops=self._dis_train_ops)
            conv4 = tf.nn.relu(conv4, name='conv4_relu2')
            pool4 = tf_utils.max_pool_2x2(conv4, name='maxpool4')

            # conv5: (N, 10, 10, 256) -> (N, 10, 10, 512)
            conv5 = tf_utils.conv2d(pool4,
                                    16 * self.dis_c,
                                    k_h=3,
                                    k_w=3,
                                    d_h=1,
                                    d_w=1,
                                    name='conv5_conv1')
            conv5 = tf_utils.batch_norm(conv5,
                                        name='conv5_batch1',
                                        _ops=self._dis_train_ops)
            conv5 = tf.nn.relu(conv5, name='conv5_relu1')
            conv5 = tf_utils.conv2d(conv5,
                                    16 * self.dis_c,
                                    k_h=3,
                                    k_w=3,
                                    d_h=1,
                                    d_w=1,
                                    name='conv5_conv2')
            conv5 = tf_utils.batch_norm(conv5,
                                        name='conv5_batch2',
                                        _ops=self._dis_train_ops)
            conv5 = tf.nn.relu(conv5, name='conv5_relu2')

            # output layer: (N, 10, 10, 512) -> (N, 1, 1, 512) -> (N, 1)
            shape = conv5.get_shape().as_list()
            gap = tf.layers.average_pooling2d(inputs=conv5,
                                              pool_size=shape[1],
                                              strides=1,
                                              padding='VALID',
                                              name='global_vaerage_pool')
            gap_flatten = tf.reshape(gap, [-1, 16 * self.dis_c])
            output = tf_utils.linear(gap_flatten, 1, name='linear_output')

            return tf.nn.sigmoid(output), output
    def discriminator_patch1(self, data, name='d_', is_reuse=False):
        with tf.variable_scope(name) as scope:
            if is_reuse is True:
                scope.reuse_variables()

            # conv1: (N, 640, 640, 4) -> (N,, 160, 160, 32)
            conv1 = tf_utils.conv2d(data,
                                    self.dis_c,
                                    k_h=3,
                                    k_w=3,
                                    d_h=2,
                                    d_w=2,
                                    name='conv1_conv1')
            conv1 = tf_utils.batch_norm(conv1,
                                        name='conv1_batch1',
                                        _ops=self._dis_train_ops)
            conv1 = tf.nn.relu(conv1, name='conv1_relu1')
            conv1 = tf_utils.conv2d(conv1,
                                    self.dis_c,
                                    k_h=3,
                                    k_w=3,
                                    d_h=1,
                                    d_w=1,
                                    name='conv1_conv2')
            conv1 = tf_utils.batch_norm(conv1,
                                        name='conv1_batch2',
                                        _ops=self._dis_train_ops)
            conv1 = tf.nn.relu(conv1, name='conv1_relu2')
            pool1 = tf_utils.max_pool_2x2(conv1, name='maxpool1')

            # conv2: (N, 160, 160, 32) -> (N, 40, 40, 64)
            conv2 = tf_utils.conv2d(pool1,
                                    2 * self.dis_c,
                                    k_h=3,
                                    k_w=3,
                                    d_h=2,
                                    d_w=2,
                                    name='conv2_conv1')
            conv2 = tf_utils.batch_norm(conv2,
                                        name='conv2_batch1',
                                        _ops=self._dis_train_ops)
            conv2 = tf.nn.relu(conv2, name='conv2_relu1')
            conv2 = tf_utils.conv2d(conv2,
                                    2 * self.dis_c,
                                    k_h=3,
                                    k_w=3,
                                    d_h=1,
                                    d_w=1,
                                    name='conv2_conv2')
            conv2 = tf_utils.batch_norm(conv2,
                                        name='conv2_batch2',
                                        _ops=self._dis_train_ops)
            conv2 = tf.nn.relu(conv2, name='conv2_relu2')
            pool2 = tf_utils.max_pool_2x2(conv2, name='maxpool2')

            # conv3: (N, 40, 40, 64) -> (N, 20, 20, 128)
            conv3 = tf_utils.conv2d(pool2,
                                    4 * self.dis_c,
                                    k_h=3,
                                    k_w=3,
                                    d_h=1,
                                    d_w=1,
                                    name='conv3_conv1')
            conv3 = tf_utils.batch_norm(conv3,
                                        name='conv3_batch1',
                                        _ops=self._dis_train_ops)
            conv3 = tf.nn.relu(conv3, name='conv3_relu1')
            conv3 = tf_utils.conv2d(conv3,
                                    4 * self.dis_c,
                                    k_h=3,
                                    k_w=3,
                                    d_h=1,
                                    d_w=1,
                                    name='conv3_conv2')
            conv3 = tf_utils.batch_norm(conv3,
                                        name='conv3_batch2',
                                        _ops=self._dis_train_ops)
            conv3 = tf.nn.relu(conv3, name='conv3_relu2')
            pool3 = tf_utils.max_pool_2x2(conv3, name='maxpool3')

            # conv4: (N, 20, 20, 128) -> (N, 10, 10, 256)
            conv4 = tf_utils.conv2d(pool3,
                                    8 * self.dis_c,
                                    k_h=3,
                                    k_w=3,
                                    d_h=1,
                                    d_w=1,
                                    name='conv4_conv1')
            conv4 = tf_utils.batch_norm(conv4,
                                        name='conv4_batch1',
                                        _ops=self._dis_train_ops)
            conv4 = tf.nn.relu(conv4, name='conv4_relu1')
            conv4 = tf_utils.conv2d(conv4,
                                    8 * self.dis_c,
                                    k_h=3,
                                    k_w=3,
                                    d_h=1,
                                    d_w=1,
                                    name='conv4_conv2')
            conv4 = tf_utils.batch_norm(conv4,
                                        name='conv4_batch2',
                                        _ops=self._dis_train_ops)
            conv4 = tf.nn.relu(conv4, name='conv4_relu2')
            pool4 = tf_utils.max_pool_2x2(conv4, name='maxpool4')

            # conv5: (N, 10, 10, 256) -> (N, 10, 10, 512)
            conv5 = tf_utils.conv2d(pool4,
                                    16 * self.dis_c,
                                    k_h=3,
                                    k_w=3,
                                    d_h=1,
                                    d_w=1,
                                    name='conv5_conv1')
            conv5 = tf_utils.batch_norm(conv5,
                                        name='conv5_batch1',
                                        _ops=self._dis_train_ops)
            conv5 = tf.nn.relu(conv5, name='conv5_relu1')
            conv5 = tf_utils.conv2d(conv5,
                                    16 * self.dis_c,
                                    k_h=3,
                                    k_w=3,
                                    d_h=1,
                                    d_w=1,
                                    name='conv5_conv2')
            conv5 = tf_utils.batch_norm(conv5,
                                        name='conv5_batch2',
                                        _ops=self._dis_train_ops)
            conv5 = tf.nn.relu(conv5, name='conv5_relu2')

            # output layer: (N, 10, 10, 512) -> (N, 10, 10, 1)
            output = tf_utils.conv2d(conv5,
                                     1,
                                     k_h=1,
                                     k_w=1,
                                     d_h=1,
                                     d_w=1,
                                     name='conv_output')

            return tf.nn.sigmoid(output), output
Exemplo n.º 12
0
    def discriminator(self, data, name='d_', is_reuse=False):
        with tf.variable_scope(name, reuse=is_reuse):
            # 256 -> 128
            h0_conv2d = tf_utils.conv2d(data, self.dis_c[0], name='h0_conv2d')
            h0_lrelu = tf_utils.lrelu(h0_conv2d, name='h0_lrelu')

            # 128 -> 64
            h1_conv2d = tf_utils.conv2d(h0_lrelu, self.dis_c[1], name='h1_conv2d')
            h1_batchnorm = tf_utils.batch_norm(h1_conv2d, name='h1_batchnorm', _ops=self._dis_train_ops)
            h1_lrelu = tf_utils.lrelu(h1_batchnorm, name='h1_lrelu')

            # 64 -> 32
            h2_conv2d = tf_utils.conv2d(h1_lrelu, self.dis_c[2], name='h2_conv2d')
            h2_batchnorm = tf_utils.batch_norm(h2_conv2d, name='h2_batchnorm', _ops=self._dis_train_ops)
            h2_lrelu = tf_utils.lrelu(h2_batchnorm, name='h2_lrelu')

            # 32 -> 32
            h3_conv2d = tf_utils.conv2d(h2_lrelu, self.dis_c[3], d_h=1, d_w=1, name='h3_conv2d')
            h3_batchnorm = tf_utils.batch_norm(h3_conv2d, name='h3_batchnorm', _ops=self._dis_train_ops)
            h3_lrelu = tf_utils.lrelu(h3_batchnorm, name='h3_lrelu')

            # linear
            h3_flatten = flatten(h3_lrelu)
            h4_linear = tf_utils.linear(h3_flatten, self.dis_c[4], name='h4_linear')

            return tf.nn.sigmoid(h4_linear), h4_linear
Exemplo n.º 13
0
    def __call__(self, x):
        output = None
        input_shape = x.get_shape().as_list()
        print('input_shape: {}'.format(input_shape))

        if input_shape[3] == 1:  # unpaired image
            with tf.variable_scope(self.name + '_unpair',
                                   reuse=self.reuse_extra_1):
                # Extra-head network
                # (N, H, W, 1) -> (N, H, W, 64)
                conv01 = tf_utils.conv2d(x,
                                         self.ndf,
                                         k_h=4,
                                         k_w=4,
                                         d_h=1,
                                         d_w=1,
                                         padding='SAME',
                                         name='conv01_conv_c1')
                conv01 = tf_utils.lrelu(conv01,
                                        name='conv01_lrelu_c1',
                                        is_print=True)

            # Share network
            conv4 = self.share_net(conv01, name=self.name)

            with tf.variable_scope(self.name + '_unpair',
                                   reuse=self.reuse_extra_1):
                # Extra-tail network
                # (N, H/16, W/8, 512) -> (N, H/16, W/16, 512)
                conv5 = tf_utils.conv2d(conv4,
                                        8 * self.ndf,
                                        k_h=4,
                                        k_w=4,
                                        d_h=1,
                                        d_w=1,
                                        padding='SAME',
                                        name='conv5_conv_c1')
                conv5 = tf_utils.norm(conv5,
                                      _type='instance',
                                      _ops=self._ops,
                                      name='conv5_norm_c1')
                conv5 = tf_utils.lrelu(conv5,
                                       name='conv5_lrelu_c1',
                                       is_print=True)
                # (N, H/16, W/8, 512) -> (N, H/16, W/16, 1)
                conv6 = tf_utils.conv2d(conv5,
                                        1,
                                        k_h=4,
                                        k_w=4,
                                        d_h=1,
                                        d_w=1,
                                        padding='SAME',
                                        name='conv6_conv_c1',
                                        is_print=True)

                if self.use_sigmoid:
                    output = tf_utils.sigmoid(conv6,
                                              name='output_sigmoid_c1',
                                              is_print=True)
                else:
                    output = tf_utils.identity(
                        conv6, name='output_without_sigmoid_c1', is_print=True)

                # set reuse=True for next call
                self.reuse_extra_1 = True

        elif input_shape[3] == 2:  # paired image
            with tf.variable_scope(self.name + '_pair',
                                   reuse=self.reuse_extra_2):
                # Extra-head network
                # (N, H, W, 1) -> (N, H, W, 64)
                conv01 = tf_utils.conv2d(x,
                                         self.ndf,
                                         k_h=4,
                                         k_w=4,
                                         d_h=1,
                                         d_w=1,
                                         padding='SAME',
                                         name='conv01_conv_c2')
                conv01 = tf_utils.lrelu(conv01,
                                        name='conv01_lrelu_c2',
                                        is_print=True)

            # Share network
            conv4 = self.share_net(conv01, name=self.name)

            with tf.variable_scope(self.name + '_pair',
                                   reuse=self.reuse_extra_2):
                # Extra-tail network
                # (N, H/16, W/8, 512) -> (N, H/16, W/16, 512)
                conv5 = tf_utils.conv2d(conv4,
                                        8 * self.ndf,
                                        k_h=4,
                                        k_w=4,
                                        d_h=1,
                                        d_w=1,
                                        padding='SAME',
                                        name='conv5_conv_c2')
                conv5 = tf_utils.norm(conv5,
                                      _type='instance',
                                      _ops=self._ops,
                                      name='conv5_norm_c2')
                conv5 = tf_utils.lrelu(conv5,
                                       name='conv5_lrelu_c2',
                                       is_print=True)
                # (N, H/16, W/8, 512) -> (N, H/16, W/16, 1)
                conv6 = tf_utils.conv2d(conv5,
                                        1,
                                        k_h=4,
                                        k_w=4,
                                        d_h=1,
                                        d_w=1,
                                        padding='SAME',
                                        name='conv6_conv_c2',
                                        is_print=True)

                if self.use_sigmoid:
                    output = tf_utils.sigmoid(conv6,
                                              name='output_sigmoid_c2',
                                              is_print=True)
                else:
                    output = tf_utils.identity(
                        conv6, name='output_without_sigmoid_c2', is_print=True)

                # set reuse=True for next call
                self.reuse_extra_2 = True

        self.unpair_variables = tf.get_collection(
            tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name + '_unpair')
        self.pair_variables = tf.get_collection(
            tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name + '_pair')
        self.share_variables = tf.get_collection(
            tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name)

        return output
Exemplo n.º 14
0
    def generator(self, data, name='g_'):
        with tf.variable_scope(name):
            data_flatten = flatten(data)

            # 4 x 4
            h0_linear = tf_utils.linear(data_flatten, 4*4*self.gen_c[0], name='h0_linear')
            h0_reshape = tf.reshape(h0_linear, [tf.shape(h0_linear)[0], 4, 4, self.gen_c[0]])
            h0_batchnorm = tf_utils.batch_norm(h0_reshape, name='h0_batchnorm', _ops=self._gen_train_ops)
            h0_relu = tf.nn.relu(h0_batchnorm, name='h0_relu')

            # 8 x 8
            h1_deconv = tf_utils.deconv2d(h0_relu, self.gen_c[1], name='h1_deconv2d')
            h1_batchnorm = tf_utils.batch_norm(h1_deconv, name='h1_batchnorm', _ops=self._gen_train_ops)
            h1_relu = tf.nn.relu(h1_batchnorm, name='h1_relu')

            # 16 x 16
            h2_deconv = tf_utils.deconv2d(h1_relu, self.gen_c[2], name='h2_deconv2d')
            h2_batchnorm = tf_utils.batch_norm(h2_deconv, name='h2_batchnorm', _ops=self._gen_train_ops)
            h2_relu = tf.nn.relu(h2_batchnorm, name='h2_relu')

            # 32 x 32
            h3_deconv = tf_utils.deconv2d(h2_relu, self.gen_c[3], name='h3_deconv2d')
            h3_batchnorm = tf_utils.batch_norm(h3_deconv, name='h3_batchnorm', _ops=self._gen_train_ops)
            h3_relu = tf.nn.relu(h3_batchnorm, name='h3_relu')

            # 64 x 64
            h4_deconv = tf_utils.deconv2d(h3_relu, self.gen_c[4], name='h4_deconv2d')
            h4_batchnorm = tf_utils.batch_norm(h4_deconv, name='h4_batchnorm', _ops=self._gen_train_ops)
            h4_relu = tf.nn.relu(h4_batchnorm, name='h4_relu')

            # 128 x 128
            h5_deconv = tf_utils.deconv2d(h4_relu, self.gen_c[5], name='h5_deconv2d')
            h5_batchnorm = tf_utils.batch_norm(h5_deconv, name='h5_batchnorm', _ops=self._gen_train_ops)
            h5_relu = tf.nn.relu(h5_batchnorm, name='h5_relu')

            # 256 x 256
            h6_deconv = tf_utils.deconv2d(h5_relu, self.gen_c[6], name='h6_deconv2d')

            return tf.nn.tanh(h6_deconv)
Exemplo n.º 15
0
    def discriminator(self, data, name='d_', is_reuse=False):
        with tf.variable_scope(name) as scope:
            if is_reuse is True:
                scope.reuse_variables()

            # 256 -> 128
            h0_conv = tf_utils.conv2d(data, self.dis_c[0], name='h0_conv2d')
            h0_lrelu = tf_utils.lrelu(h0_conv, name='h0_lrelu')

            # 128 -> 64
            h1_conv = tf_utils.conv2d(h0_lrelu, self.dis_c[1], name='h1_conv2d')
            h1_batchnorm = tf_utils.batch_norm(h1_conv, name='h1_batchnorm', _ops=self._dis_train_ops)
            h1_lrelu = tf_utils.lrelu(h1_batchnorm, name='h1_lrelu')

            # 64 -> 32
            h2_conv = tf_utils.conv2d(h1_lrelu, self.dis_c[2], name='h2_conv2d')
            h2_batchnorm = tf_utils.batch_norm(h2_conv, name='h2_batchnorm', _ops=self._dis_train_ops)
            h2_lrelu = tf_utils.lrelu(h2_batchnorm, name='h2_lrelu')

            # 32 -> 16
            h3_conv = tf_utils.conv2d(h2_lrelu, self.dis_c[3], name='h3_conv2d')
            h3_batchnorm = tf_utils.batch_norm(h3_conv, name='h3_batchnorm', _ops=self._dis_train_ops)
            h3_lrelu = tf_utils.lrelu(h3_batchnorm, name='h3_lrelu')

            # 16 -> 8
            h4_conv = tf_utils.conv2d(h3_lrelu, self.dis_c[4], name='h4_conv2d')
            h4_batchnorm = tf_utils.batch_norm(h4_conv, name='h4_batchnorm', _ops=self._dis_train_ops)
            h4_lrelu = tf_utils.lrelu(h4_batchnorm, name='h4_lrelu')

            # 8 -> 4
            h5_conv = tf_utils.conv2d(h4_lrelu, self.dis_c[5], name='h5_conv2d')
            h5_batchnorm = tf_utils.batch_norm(h5_conv, name='h5_batchnorm', _ops=self._dis_train_ops)
            h5_lrelu = tf_utils.lrelu(h5_batchnorm, name='h5_lrelu')

            h5_flatten = flatten(h5_lrelu)
            h6_linear = tf_utils.linear(h5_flatten, self.dis_c[6], name='h6_linear')

            return tf.nn.sigmoid(h6_linear), h6_linear
Exemplo n.º 16
0
    def generator(self, data, name='g_'):
        with tf.variable_scope(name):
            # 256 -> 128
            e0_conv2d = tf_utils.conv2d(data, self.gen_c[0], name='e0_conv2d')
            e0_lrelu = tf_utils.lrelu(e0_conv2d, name='e0_lrelu')

            # 128 -> 64
            e1_conv2d = tf_utils.conv2d(e0_lrelu, self.gen_c[1], name='e1_conv2d')
            e1_batchnorm = tf_utils.batch_norm(e1_conv2d, name='e1_batchnorm', _ops=self._gen_train_ops)
            e1_lrelu = tf_utils.lrelu(e1_batchnorm, name='e1_lrelu')

            # 64 -> 32
            e2_conv2d = tf_utils.conv2d(e1_lrelu, self.gen_c[2], name='e2_conv2d')
            e2_batchnorm = tf_utils.batch_norm(e2_conv2d, name='e2_batchnorm', _ops=self._gen_train_ops)
            e2_lrelu = tf_utils.lrelu(e2_batchnorm, name='e2_lrelu')

            # 32 -> 16
            e3_conv2d = tf_utils.conv2d(e2_lrelu, self.gen_c[3], name='e3_conv2d')
            e3_batchnorm = tf_utils.batch_norm(e3_conv2d, name='e3_batchnorm', _ops=self._gen_train_ops)
            e3_lrelu = tf_utils.lrelu(e3_batchnorm, name='e3_lrelu')

            # 16 -> 8
            e4_conv2d = tf_utils.conv2d(e3_lrelu, self.gen_c[4], name='e4_conv2d')
            e4_batchnorm = tf_utils.batch_norm(e4_conv2d, name='e4_batchnorm', _ops=self._gen_train_ops)
            e4_lrelu = tf_utils.lrelu(e4_batchnorm, name='e4_lrelu')

            # 8 -> 4
            e5_conv2d = tf_utils.conv2d(e4_lrelu, self.gen_c[5], name='e5_conv2d')
            e5_batchnorm = tf_utils.batch_norm(e5_conv2d, name='e5_batchnorm', _ops=self._gen_train_ops)
            e5_lrelu = tf_utils.lrelu(e5_batchnorm, name='e5_lrelu')

            # 4 -> 2
            e6_conv2d = tf_utils.conv2d(e5_lrelu, self.gen_c[6], name='e6_conv2d')
            e6_batchnorm = tf_utils.batch_norm(e6_conv2d, name='e6_batchnorm', _ops=self._gen_train_ops)
            e6_lrelu = tf_utils.lrelu(e6_batchnorm, name='e6_lrelu')

            # 2 -> 1
            e7_conv2d = tf_utils.conv2d(e6_lrelu, self.gen_c[7], name='e7_conv2d')
            e7_batchnorm = tf_utils.batch_norm(e7_conv2d, name='e7_batchnorm', _ops=self._gen_train_ops)
            e7_relu = tf.nn.relu(e7_batchnorm, name='e7_relu')

            # 1 -> 2
            d0_deconv = tf_utils.deconv2d(e7_relu, self.gen_c[8], name='d0_deconv2d')
            d0_batchnorm = tf_utils.batch_norm(d0_deconv, name='d0_batchnorm', _ops=self._gen_train_ops)
            d0_drop = tf.nn.dropout(d0_batchnorm, keep_prob=0.5, name='d0_dropout')
            d0_concat = tf.concat([d0_drop, e6_batchnorm], axis=3, name='d0_concat')
            d0_relu = tf.nn.relu(d0_concat, name='d0_relu')

            # 2 -> 4
            d1_deconv = tf_utils.deconv2d(d0_relu, self.gen_c[9], name='d1_deconv2d')
            d1_batchnorm = tf_utils.batch_norm(d1_deconv, name='d1_batchnorm', _ops=self._gen_train_ops)
            d1_drop = tf.nn.dropout(d1_batchnorm, keep_prob=0.5, name='d1_dropout')
            d1_concat = tf.concat([d1_drop, e5_batchnorm], axis=3, name='d1_concat')
            d1_relu = tf.nn.relu(d1_concat, name='d1_relu')

            # 4 -> 8
            d2_deconv = tf_utils.deconv2d(d1_relu, self.gen_c[10], name='d2_deconv2d')
            d2_batchnorm = tf_utils.batch_norm(d2_deconv, name='d2_batchnorm', _ops=self._gen_train_ops)
            d2_drop = tf.nn.dropout(d2_batchnorm, keep_prob=0.5, name='d2_dropout')
            d2_concat = tf.concat([d2_drop, e4_batchnorm], axis=3, name='d2_concat')
            d2_relu = tf.nn.relu(d2_concat, name='d2_relu')

            # 8 -> 16
            d3_deconv = tf_utils.deconv2d(d2_relu, self.gen_c[11], name='d3_deconv2d')
            d3_batchnorm = tf_utils.batch_norm(d3_deconv, name='d3_batchnorm', _ops=self._gen_train_ops)
            d3_concat = tf.concat([d3_batchnorm, e3_batchnorm], axis=3, name='d3_concat')
            d3_relu = tf.nn.relu(d3_concat, name='d3_relu')

            # 16 -> 32
            d4_deconv = tf_utils.deconv2d(d3_relu, self.gen_c[12], name='d4_deconv2d')
            d4_batchnorm = tf_utils.batch_norm(d4_deconv, name='d4_batchnorm', _ops=self._gen_train_ops)
            d4_concat = tf.concat([d4_batchnorm, e2_batchnorm], axis=3, name='d4_concat')
            d4_relu = tf.nn.relu(d4_concat, name='d4_relu')

            # 32 -> 64
            d5_deconv = tf_utils.deconv2d(d4_relu, self.gen_c[13], name='d5_deconv2d')
            d5_batchnorm = tf_utils.batch_norm(d5_deconv, name='d5_batchnorm', _ops=self._gen_train_ops)
            d5_concat = tf.concat([d5_batchnorm, e1_batchnorm], axis=3, name='d5_concat')
            d5_relu = tf.nn.relu(d5_concat, name='d5_relu')

            # 64 -> 128
            d6_deconv = tf_utils.deconv2d(d5_relu, self.gen_c[14], name='d6_deconv2d')
            d6_batchnorm = tf_utils.batch_norm(d6_deconv, name='d6_batchnorm', _ops=self._gen_train_ops)
            d6_concat = tf.concat([d6_batchnorm, e0_conv2d], axis=3, name='d6_concat')
            d6_relu = tf.nn.relu(d6_concat, name='d6_relu')

            # 128 -> 256
            d7_deconv = tf_utils.deconv2d(d6_relu, self.gen_c[15], name='d7_deconv2d')

            return tf.nn.tanh(d7_deconv)
Exemplo n.º 17
0
    def __call__(self, x):
        with tf.variable_scope(self.name, reuse=self.reuse):
            tf_utils.print_activations(x)

            # (N, H, W, C) -> (N, H, W, 64)
            conv1 = tf_utils.padding2d(x,
                                       p_h=3,
                                       p_w=3,
                                       pad_type='REFLECT',
                                       name='conv1_padding')
            conv1 = tf_utils.conv2d(conv1,
                                    self.ngf,
                                    k_h=7,
                                    k_w=7,
                                    d_h=1,
                                    d_w=1,
                                    padding='VALID',
                                    name='conv1_conv')
            conv1 = tf_utils.norm(conv1,
                                  _type='instance',
                                  _ops=self._ops,
                                  name='conv1_norm')
            conv1 = tf_utils.relu(conv1, name='conv1_relu', is_print=True)

            # (N, H, W, 64)  -> (N, H/2, W/2, 128)
            conv2 = tf_utils.conv2d(conv1,
                                    2 * self.ngf,
                                    k_h=3,
                                    k_w=3,
                                    d_h=2,
                                    d_w=2,
                                    padding='SAME',
                                    name='conv2_conv')
            conv2 = tf_utils.norm(
                conv2,
                _type='instance',
                _ops=self._ops,
                name='conv2_norm',
            )
            conv2 = tf_utils.relu(conv2, name='conv2_relu', is_print=True)

            # (N, H/2, W/2, 128) -> (N, H/4, W/4, 256)
            conv3 = tf_utils.conv2d(conv2,
                                    4 * self.ngf,
                                    k_h=3,
                                    k_w=3,
                                    d_h=2,
                                    d_w=2,
                                    padding='SAME',
                                    name='conv3_conv')
            conv3 = tf_utils.norm(
                conv3,
                _type='instance',
                _ops=self._ops,
                name='conv3_norm',
            )
            conv3 = tf_utils.relu(conv3, name='conv3_relu', is_print=True)

            # (N, H/4, W/4, 256) -> (N, H/4, W/4, 256)
            if (self.image_size[0] <= 128) and (self.image_size[1] <= 128):
                # use 6 residual blocks for 128x128 images
                res_out = tf_utils.n_res_blocks(conv3,
                                                num_blocks=6,
                                                is_print=True)
            else:
                # use 9 blocks for higher resolution
                res_out = tf_utils.n_res_blocks(conv3,
                                                num_blocks=9,
                                                is_print=True)

            # (N, H/4, W/4, 256) -> (N, H/2, W/2, 128)
            conv4 = tf_utils.deconv2d(res_out,
                                      2 * self.ngf,
                                      name='conv4_deconv2d')
            conv4 = tf_utils.norm(conv4,
                                  _type='instance',
                                  _ops=self._ops,
                                  name='conv4_norm')
            conv4 = tf_utils.relu(conv4, name='conv4_relu', is_print=True)

            # (N, H/2, W/2, 128) -> (N, H, W, 64)
            conv5 = tf_utils.deconv2d(conv4, self.ngf, name='conv5_deconv2d')
            conv5 = tf_utils.norm(conv5,
                                  _type='instance',
                                  _ops=self._ops,
                                  name='conv5_norm')
            conv5 = tf_utils.relu(conv5, name='conv5_relu', is_print=True)

            # (N, H, W, 64) -> (N, H, W, 3)
            conv6 = tf_utils.padding2d(conv5,
                                       p_h=3,
                                       p_w=3,
                                       pad_type='REFLECT',
                                       name='output_padding')
            conv6 = tf_utils.conv2d(conv6,
                                    self.image_size[2],
                                    k_h=7,
                                    k_w=7,
                                    d_h=1,
                                    d_w=1,
                                    padding='VALID',
                                    name='output_conv')
            output = tf_utils.tanh(conv6, name='output_tanh', is_print=True)

            # set reuse=True for next call
            self.reuse = True
            self.variables = tf.get_collection(
                tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name)

            return output
Exemplo n.º 18
0
    def __call__(self, x):
        with tf.variable_scope(self.name, reuse=self.reuse):
            tf_utils.print_activations(x)

            # (N, H, W, C) -> (N, H/2, W/2, 64)
            conv1 = tf_utils.conv2d(x,
                                    self.ndf,
                                    k_h=4,
                                    k_w=4,
                                    d_h=2,
                                    d_w=2,
                                    padding='SAME',
                                    name='conv1_conv')
            conv1 = tf_utils.lrelu(conv1, name='conv1_lrelu', is_print=True)

            # (N, H/2, W/2, 64) -> (N, H/4, W/4, 128)
            conv2 = tf_utils.conv2d(conv1,
                                    2 * self.ndf,
                                    k_h=4,
                                    k_w=4,
                                    d_h=2,
                                    d_w=2,
                                    padding='SAME',
                                    name='conv2_conv')
            conv2 = tf_utils.norm(conv2,
                                  _type='instance',
                                  _ops=self._ops,
                                  name='conv2_norm')
            conv2 = tf_utils.lrelu(conv2, name='conv2_lrelu', is_print=True)

            # (N, H/4, W/4, 128) -> (N, H/8, W/8, 256)
            conv3 = tf_utils.conv2d(conv2,
                                    4 * self.ndf,
                                    k_h=4,
                                    k_w=4,
                                    d_h=2,
                                    d_w=2,
                                    padding='SAME',
                                    name='conv3_conv')
            conv3 = tf_utils.norm(conv3,
                                  _type='instance',
                                  _ops=self._ops,
                                  name='conv3_norm')
            conv3 = tf_utils.lrelu(conv3, name='conv3_lrelu', is_print=True)

            # (N, H/8, W/8, 256) -> (N, H/16, W/16, 512)
            conv4 = tf_utils.conv2d(conv3,
                                    8 * self.ndf,
                                    k_h=4,
                                    k_w=4,
                                    d_h=2,
                                    d_w=2,
                                    padding='SAME',
                                    name='conv4_conv')
            conv4 = tf_utils.norm(conv4,
                                  _type='instance',
                                  _ops=self._ops,
                                  name='conv4_norm')
            conv4 = tf_utils.lrelu(conv4, name='conv4_lrelu', is_print=True)

            # (N, H/16, W/16, 512) -> (N, H/16, W/16, 1)
            conv5 = tf_utils.conv2d(conv4,
                                    1,
                                    k_h=4,
                                    k_w=4,
                                    d_h=1,
                                    d_w=1,
                                    padding='SAME',
                                    name='conv5_conv',
                                    is_print=True)

            if self.use_sigmoid:
                output = tf_utils.sigmoid(conv5,
                                          name='output_sigmoid',
                                          is_print=True)
            else:
                output = tf_utils.identity(conv5,
                                           name='output_without_sigmoid',
                                           is_print=True)

            # set reuse=True for next call
            self.reuse = True
            self.variables = tf.get_collection(
                tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name)

            return output
Exemplo n.º 19
0
    def share_net(self, x, name=''):
        with tf.variable_scope(name, reuse=self.reuse):
            tf_utils.print_activations(x)

            # (N, H, W, C) -> (N, H/2, W/2, 64)
            conv1 = tf_utils.conv2d(x,
                                    self.ndf,
                                    k_h=4,
                                    k_w=4,
                                    d_h=2,
                                    d_w=2,
                                    padding='SAME',
                                    name='conv1_conv')
            conv1 = tf_utils.lrelu(conv1, name='conv1_lrelu', is_print=True)

            # (N, H/2, W/2, 64) -> (N, H/4, W/4, 128)
            conv2 = tf_utils.conv2d(conv1,
                                    2 * self.ndf,
                                    k_h=4,
                                    k_w=4,
                                    d_h=2,
                                    d_w=2,
                                    padding='SAME',
                                    name='conv2_conv')
            conv2 = tf_utils.norm(conv2,
                                  _type='instance',
                                  _ops=self._ops,
                                  name='conv2_norm')
            conv2 = tf_utils.lrelu(conv2, name='conv2_lrelu', is_print=True)

            # (N, H/4, W/4, 128) -> (N, H/8, W/8, 256)
            conv3 = tf_utils.conv2d(conv2,
                                    4 * self.ndf,
                                    k_h=4,
                                    k_w=4,
                                    d_h=2,
                                    d_w=2,
                                    padding='SAME',
                                    name='conv3_conv')
            conv3 = tf_utils.norm(conv3,
                                  _type='instance',
                                  _ops=self._ops,
                                  name='conv3_norm')
            conv3 = tf_utils.lrelu(conv3, name='conv3_lrelu', is_print=True)

            # (N, H/8, W/8, 256) -> (N, H/16, W/16, 512)
            conv4 = tf_utils.conv2d(conv3,
                                    8 * self.ndf,
                                    k_h=4,
                                    k_w=4,
                                    d_h=2,
                                    d_w=2,
                                    padding='SAME',
                                    name='conv4_conv')
            conv4 = tf_utils.norm(conv4,
                                  _type='instance',
                                  _ops=self._ops,
                                  name='conv4_norm')
            conv4 = tf_utils.lrelu(conv4, name='conv4_lrelu', is_print=True)

            # (N, H/16, W/16, 512) -> (N, H/16, W/16, 1)
            # conv5 = tf_utils.conv2d(conv4, 1, k_h=4, k_w=4, d_h=1, d_w=1, padding='SAME',
            #                         name='conv5_conv', is_print=True)
            #
            # if self.use_sigmoid:
            #     output = tf_utils.sigmoid(conv5, name='output_sigmoid', is_print=True)
            # else:
            #     output = tf_utils.identity(conv5, name='output_without_sigmoid', is_print=True)

            # set reuse=True for next call
            self.reuse = True

            return conv4
Exemplo n.º 20
0
    def discriminator(self, data, name='d_', is_reuse=False):
        with tf.variable_scope(name) as scope:
            if is_reuse is True:
                scope.reuse_variables()

            # 256 -> 128
            h0_conv2d = tf_utils.conv2d(data, self.dis_c[0], name='h0_conv2d')
            h0_lrelu = tf_utils.lrelu(h0_conv2d, name='h0_lrelu')

            # 128 -> 64
            h1_conv2d = tf_utils.conv2d(h0_lrelu,
                                        self.dis_c[1],
                                        name='h1_conv2d')
            h1_batchnorm = tf_utils.batch_norm(h1_conv2d,
                                               name='h1_batchnorm',
                                               _ops=self._dis_train_ops)
            h1_lrelu = tf_utils.lrelu(h1_batchnorm, name='h1_lrelu')

            # 64 -> 32
            h2_conv2d = tf_utils.conv2d(h1_lrelu,
                                        self.dis_c[2],
                                        name='h2_conv2d')
            h2_batchnorm = tf_utils.batch_norm(h2_conv2d,
                                               name='h2_batchnorm',
                                               _ops=self._dis_train_ops)
            h2_lrelu = tf_utils.lrelu(h2_batchnorm, name='h2_lrelu')

            # 32 -> 16
            h3_conv2d = tf_utils.conv2d(h2_lrelu,
                                        self.dis_c[3],
                                        name='h3_conv2d')
            h3_batchnorm = tf_utils.batch_norm(h3_conv2d,
                                               name='h3_batchnorm',
                                               _ops=self._dis_train_ops)
            h3_lrelu = tf_utils.lrelu(h3_batchnorm, name='h3_lrelu')

            # Patch GAN: 16 -> 16
            h4_conv2d = tf_utils.conv2d(h3_lrelu,
                                        self.dis_c[4],
                                        k_h=3,
                                        k_w=3,
                                        d_h=1,
                                        d_w=1,
                                        name='h4_conv2d')

            return tf.nn.sigmoid(h4_conv2d), h4_conv2d