def discriminator(inputdisc, name="discriminator"): with tf.variable_scope(name): f = 4 padw = 2 pad_input = tf.pad(inputdisc, [[0, 0], [padw, padw], [ padw, padw], [0, 0]], "CONSTANT") o_c1 = layers.general_conv2d(pad_input, model.ndf, f, f, 2, 2, 0.02, "VALID", "c1", do_norm=False, relufactor=0.2) pad_o_c1 = tf.pad(o_c1, [[0, 0], [padw, padw], [ padw, padw], [0, 0]], "CONSTANT") o_c2 = layers.general_conv2d(pad_o_c1, model.ndf * 2, f, f, 2, 2, 0.02, "VALID", "c2", relufactor=0.2) pad_o_c2 = tf.pad(o_c2, [[0, 0], [padw, padw], [ padw, padw], [0, 0]], "CONSTANT") o_c3 = layers.general_conv2d(pad_o_c2, model.ndf * 4, f, f, 2, 2, 0.02, "VALID", "c3", relufactor=0.2) pad_o_c3 = tf.pad(o_c3, [[0, 0], [padw, padw], [ padw, padw], [0, 0]], "CONSTANT") o_c4 = layers.general_conv2d(pad_o_c3, model.ndf * 8, f, f, 1, 1, 0.02, "VALID", "c4", relufactor=0.2) pad_o_c4 = tf.pad(o_c4, [[0, 0], [padw, padw], [ padw, padw], [0, 0]], "CONSTANT") o_c5 = layers.general_conv2d( pad_o_c4, 1, f, f, 1, 1, 0.02, "VALID", "c5", do_norm=False, do_relu=False) return o_c5
def patch_discriminator(inputdisc, name="discriminator"): with tf.variable_scope(name): f = 4 patch_input = tf.random_crop(inputdisc, [1, 70, 70, 3]) o_c1 = layers.general_conv2d(patch_input, model.ndf, f, f, 2, 2, 0.02, "SAME", "c1", do_norm="False", relufactor=0.2) o_c2 = layers.general_conv2d(o_c1, model.ndf * 2, f, f, 2, 2, 0.02, "SAME", "c2", relufactor=0.2) o_c3 = layers.general_conv2d(o_c2, model.ndf * 4, f, f, 2, 2, 0.02, "SAME", "c3", relufactor=0.2) o_c4 = layers.general_conv2d(o_c3, model.ndf * 8, f, f, 2, 2, 0.02, "SAME", "c4", relufactor=0.2) o_c5 = layers.general_conv2d( o_c4, 1, f, f, 1, 1, 0.02, "SAME", "c5", do_norm=False, do_relu=False) return o_c5
def discriminator_tf(inputdisc, name="discriminator"): with tf.variable_scope(name): f = 4 o_c1 = layers.general_conv2d(inputdisc, model.ndf, f, f, 2, 2, 0.02, "SAME", "c1", do_norm=False, relufactor=0.2) o_c2 = layers.general_conv2d(o_c1, model.ndf * 2, f, f, 2, 2, 0.02, "SAME", "c2", relufactor=0.2) o_c3 = layers.general_conv2d(o_c2, model.ndf * 4, f, f, 2, 2, 0.02, "SAME", "c3", relufactor=0.2) o_c4 = layers.general_conv2d(o_c3, model.ndf * 8, f, f, 1, 1, 0.02, "SAME", "c4", relufactor=0.2) o_c5 = layers.general_conv2d( o_c4, 1, f, f, 1, 1, 0.02, "SAME", "c5", do_norm=False, do_relu=False ) return o_c5
def build_resnet_block(inputres, dim, name="resnet", padding="REFLECT"): """build a single block of resnet. :param inputres: inputres :param dim: dim :param name: name :param padding: for tensorflow version use REFLECT; for pytorch version use CONSTANT :return: a single block of resnet. """ with tf.variable_scope(name): out_res = tf.pad(inputres, [[0, 0], [1, 1], [ 1, 1], [0, 0]], padding) out_res = layers.general_conv2d( out_res, dim, 3, 3, 1, 1, 0.02, "VALID", "c1") out_res = tf.pad(out_res, [[0, 0], [1, 1], [1, 1], [0, 0]], padding) out_res = layers.general_conv2d( out_res, dim, 3, 3, 1, 1, 0.02, "VALID", "c2", do_relu=False) return tf.nn.relu(out_res + inputres)
def build_generator_resnet_9blocks(inputgen, name="generator", skip=False): with tf.variable_scope(name): f = 7 ks = 3 padding = "CONSTANT" pad_input = tf.pad(inputgen, [[0, 0], [ks, ks], [ ks, ks], [0, 0]], padding) o_c1 = layers.general_conv2d( pad_input, model.ngf, f, f, 1, 1, 0.02, name="c1") o_c2 = layers.general_conv2d( o_c1, model.ngf * 2, ks, ks, 2, 2, 0.02, "SAME", "c2") o_c3 = layers.general_conv2d( o_c2, model.ngf * 4, ks, ks, 2, 2, 0.02, "SAME", "c3") o_r1 = model.build_resnet_block(o_c3, model.ngf * 4, "r1", padding) o_r2 = model.build_resnet_block(o_r1, model.ngf * 4, "r2", padding) o_r3 = model.build_resnet_block(o_r2, model.ngf * 4, "r3", padding) o_r4 = model.build_resnet_block(o_r3, model.ngf * 4, "r4", padding) o_r5 = model.build_resnet_block(o_r4, model.ngf * 4, "r5", padding) o_r6 = model.build_resnet_block(o_r5, model.ngf * 4, "r6", padding) o_r7 = model.build_resnet_block(o_r6, model.ngf * 4, "r7", padding) o_r8 = model.build_resnet_block(o_r7, model.ngf * 4, "r8", padding) o_r9 = model.build_resnet_block(o_r8, model.ngf * 4, "r9", padding) o_c4 = layers.general_deconv2d( o_r9, [model.BATCH_SIZE, 128, 128, model.ngf * 2], model.ngf * 2, ks, ks, 2, 2, 0.02, "SAME", "c4") o_c5 = layers.general_deconv2d( o_c4, [model.BATCH_SIZE, 256, 256, model.ngf], model.ngf, ks, ks, 2, 2, 0.02, "SAME", "c5") o_c6 = layers.general_conv2d(o_c5, model.IMG_CHANNELS, f, f, 1, 1, 0.02, "SAME", "c6", do_norm=False, do_relu=False) if skip is True: out_gen = tf.nn.tanh(inputgen + o_c6, "t1") else: out_gen = tf.nn.tanh(o_c6, "t1") return out_gen