예제 #1
0
def generator(num_channels=1, num_timesteps=8, num_preproc=3):
    initial_state = Input(shape=(None, None, 256))
    noise_in_update = Input(shape=(num_timesteps, None, None, 8),
                            name="noise_in_update")
    lores_in = Input(shape=(num_timesteps, None, None, num_channels),
                     name="cond_in")
    inputs = [lores_in, initial_state, noise_in_update]

    xt = TimeDistributed(ReflectionPadding2D(padding=(1, 1)))(lores_in)
    xt = TimeDistributed(
        Conv2D(256 - noise_in_update.shape[-1], kernel_size=(3, 3)))(xt)
    xt = Concatenate()([xt, noise_in_update])
    for i in range(num_preproc):
        xt = res_block(256, time_dist=True, activation='relu')(xt)

    def gen_gate(activation='sigmoid'):
        def gate(x):
            x = ReflectionPadding2D(padding=(1, 1))(x)
            x = Conv2D(256, kernel_size=(3, 3))(x)
            if activation is not None:
                x = Activation(activation)(x)
            return x

        return Lambda(gate)

    x = CustomGateGRU(update_gate=gen_gate(),
                      reset_gate=gen_gate(),
                      output_gate=gen_gate(activation=None),
                      return_sequences=True,
                      time_steps=num_timesteps)([xt, initial_state])

    h = x[:, -1, ...]

    block_channels = [256, 256, 128, 64, 32]
    for (i, channels) in enumerate(block_channels):
        if i > 0:
            x = TimeDistributed(UpSampling2D(interpolation='bilinear'))(x)
        x = res_block(channels, time_dist=True, activation='leakyrelu')(x)

    x = TimeDistributed(ReflectionPadding2D(padding=(1, 1)))(x)
    img_out = TimeDistributed(
        Conv2D(num_channels, kernel_size=(3, 3), activation='sigmoid'))(x)

    model = Model(inputs=inputs, outputs=[img_out, h])

    def noise_shapes(img_shape=(128, 128)):
        noise_shape_update = (num_timesteps, img_shape[0] // 16,
                              img_shape[1] // 16, 8)
        return [noise_shape_update]

    return (model, noise_shapes)
예제 #2
0
    def generator(self, reuse=False):
        """Returns model generator, which is a DeConvNet.
        Assumed properties:
            gen_input - a scalar
            batch_size
            dimensions of filters and other hyperparameters.
            ...
        """
        with tf.variable_scope("conv1"):
            if self.test_images is not None:
                h = conv_block(self.test_images, relu=True, reuse=reuse)
            else:
                # noise = tf.random_normal(self.g_images.get_shape(), stddev=.03 * 255)
                h = self.g_images
                h = conv_block(self.g_images, relu=True, reuse=reuse)

        for i in range(1, 16):
            with tf.variable_scope("res" + str(i)):
                h = res_block(h, self.is_training, reuse=reuse)

        with tf.variable_scope("deconv1"):
            h = deconv_block(h)

        with tf.variable_scope("deconv2"):
            h = deconv_block(h)

        with tf.variable_scope("conv2"):
            h = conv_block(h, output_channels=3, reuse=reuse)

        return h
예제 #3
0
def initial_state_model(num_preproc=3):
    initial_frame_in = Input(shape=(None, None, 1))
    noise_in_initial = Input(shape=(None, None, 8), name="noise_in_initial")

    h = ReflectionPadding2D(padding=(1, 1))(initial_frame_in)
    h = Conv2D(256 - noise_in_initial.shape[-1], kernel_size=(3, 3))(h)
    h = Concatenate()([h, noise_in_initial])
    for i in range(num_preproc):
        h = res_block(256, activation='relu')(h)

    return Model(inputs=[initial_frame_in, noise_in_initial], outputs=h)
예제 #4
0
def discriminator(num_channels=1, num_timesteps=8):
    hires_in = Input(shape=(num_timesteps, None, None, num_channels),
                     name="sample_in")
    lores_in = Input(shape=(num_timesteps, None, None, num_channels),
                     name="cond_in")

    x_hr = hires_in
    x_lr = lores_in

    block_channels = [32, 64, 128, 256]
    for (i, channels) in enumerate(block_channels):
        x_hr = res_block(channels, time_dist=True, norm="spectral",
                         stride=2)(x_hr)
        x_lr = res_block(channels, time_dist=True, norm="spectral")(x_lr)

    x_joint = Concatenate()([x_lr, x_hr])
    x_joint = res_block(256, time_dist=True, norm="spectral")(x_joint)
    x_joint = res_block(256, time_dist=True, norm="spectral")(x_joint)

    x_hr = res_block(256, time_dist=True, norm="spectral")(x_hr)
    x_hr = res_block(256, time_dist=True, norm="spectral")(x_hr)

    def disc_gate(activation='sigmoid'):
        def gate(x):
            x = ReflectionPadding2D(padding=(1, 1))(x)
            x = SNConv2D(256,
                         kernel_size=(3, 3),
                         kernel_initializer='he_uniform')(x)
            if activation is not None:
                x = Activation(activation)(x)
            return x

        return Lambda(gate)

    h = Lambda(lambda x: tf.zeros_like(x[:, 0, ...]))
    x_joint = CustomGateGRU(update_gate=disc_gate(),
                            reset_gate=disc_gate(),
                            output_gate=disc_gate(activation=None),
                            return_sequences=True,
                            time_steps=num_timesteps)([x_joint,
                                                       h(x_joint)])
    x_hr = CustomGateGRU(update_gate=disc_gate(),
                         reset_gate=disc_gate(),
                         output_gate=disc_gate(activation=None),
                         return_sequences=True,
                         time_steps=num_timesteps)([x_hr, h(x_hr)])

    x_avg_joint = TimeDistributed(GlobalAveragePooling2D())(x_joint)
    x_avg_hr = TimeDistributed(GlobalAveragePooling2D())(x_hr)

    x = Concatenate()([x_avg_joint, x_avg_hr])
    x = TimeDistributed(SNDense(256))(x)
    x = LeakyReLU(0.2)(x)

    disc_out = TimeDistributed(SNDense(1))(x)

    disc = Model(inputs=[lores_in, hires_in], outputs=disc_out, name='disc')

    return disc
예제 #5
0
    def build_G(self, x_bound, x_label, x_feat, x_k, x_b):
        with tf.variable_scope('G'):

            # 融合
            x_feat_act = tf.add(tf.multiply(x_feat, x_k), x_b)
            x_concat = tf.concat([x_bound, x_label, x_feat_act], 3)

            #
            input_downsampled = tf.nn.avg_pool(x_concat,
                                               ksize=[1, 3, 3, 1],
                                               strides=[1, 2, 2, 1],
                                               padding="SAME")

            # G1
            _, G1_relu_up4 = G_base('G1', input_downsampled, self.batch)

            # G2_1
            G2_1_conv1 = conv('G2_1_conv1', x_concat, 7 * 7, 64, 1, None, True)
            G2_1_ins1 = ins_norm('G2_1_ins1', G2_1_conv1)
            G2_1_relu1 = relu('G2_1_relu1', G2_1_ins1)

            G2_1_conv2 = conv('G2_1_conv2', G2_1_relu1, 3 * 3, 128, 2, 1,
                              False)
            G2_1_ins2 = ins_norm('G2_1_ins2', G2_1_conv2)
            G2_1_relu2 = relu('G2_1_relu2', G2_1_ins2)

            # 融合G1的输出和G2_1的输出 128
            G_add = tf.add(G1_relu_up4, G2_1_relu2, name='G_Add')

            # G2_2
            # res_block
            for i in range(3):
                name = 'G2_2_res' + str(i + 1)
                G_add = res_block(name, G_add, channels=128)
            #
            G2_2_trans = conv_trans('G2_2_trans', G_add, 3 * 3, 64, 2,
                                    self.batch, True)
            G2_2_ins2 = ins_norm('G2_2_ins2', G2_2_trans)
            G2_2_relu2 = relu('G2_2_relu2', G2_2_ins2)

            # final convolution
            G2_2_conv_end = conv('G2_2_conv_end', G2_2_relu2, 7 * 7, 3, 1,
                                 None, True)
            G2_2_tanh_end = tanh('G2_2_tanh_end', G2_2_conv_end)

            return G2_2_tanh_end