Exemple #1
0
 def to_image(inp, style, channels, image_size):
     # Not sure if this works for rectangular images
     x = Conv2DMod(channels,
                   1,
                   kernel_initializer=VarianceScaling(200 / inp.shape[2]),
                   demod=False)([inp, style])
     return Lambda(Generator.upsample_to_size,
                   output_shape=[None, image_size[0], image_size[1],
                                 None])([x, image_size])
Exemple #2
0
 def c(i, style, noise, l, l_):
     o = Conv2DMod(layer_dict[l_] * self.filter_num,
                   kernel_size=3,
                   strides=1)([i, style])
     noise_crop = keras.layers.Cropping2D(
         (input_shape[0] - l_) // 2)(noise)
     o_n = conv(layer_dict[l_] * self.filter_num,
                kernel_size=1,
                strides=1)(noise_crop)
     o = keras.layers.Add()([o, o_n])
     o = act_layer(o)
     o = Conv2DMod(layer_dict[l_] * self.filter_num,
                   kernel_size=3,
                   strides=1)([o, style])
     o_n = conv(layer_dict[l_] * self.filter_num,
                kernel_size=1,
                strides=1)(noise_crop)
     o = keras.layers.Add()([o, o_n])
     o = act_layer(o)
     return o
Exemple #3
0
    def _generator_block(inp,
                         input_style,
                         input_noise,
                         filters,
                         final_image_size,
                         upsample=True,
                         channels=3):
        if upsample:
            out = UpSampling2D(interpolation='bilinear')(inp)
        else:
            out = inp

        image_style = Dense(filters,
                            kernel_initializer=VarianceScaling(
                                200 / out.shape[2]))(input_style)

        style = Dense(inp.shape[-1],
                      kernel_initializer='he_uniform')(input_style)
        cropped_noise = Lambda(
            Generator.crop_noise_to_size)([input_noise, out])
        noise = Dense(filters, kernel_initializer='zeros')(cropped_noise)

        out = Conv2DMod(filters=filters,
                        kernel_size=3,
                        padding='same',
                        kernel_initializer='he_uniform')([out, style])
        out = Add()([out, noise])
        out = LeakyReLU(0.2)(out)

        style = Dense(filters, kernel_initializer='he_uniform')(input_style)
        noise = Dense(filters, kernel_initializer='zeros')(cropped_noise)

        out = Conv2DMod(filters=filters,
                        kernel_size=3,
                        padding='same',
                        kernel_initializer='he_uniform')([out, style])
        out = Add()([out, noise])
        out = LeakyReLU(0.2)(out)

        return out, Generator.to_image(out, image_style, channels,
                                       final_image_size)
Exemple #4
0
 def up(i, style, noise, l, l_):
     o = keras.layers.UpSampling2D(size=l_ // l,
                                   interpolation="bilinear")(i)
     o = Conv2DMod(layer_dict[l_] * self.filter_num,
                   kernel_size=3,
                   strides=1)([o, style])
     noise_crop = keras.layers.Cropping2D(
         (input_shape[0] - l_) // 2)(noise)
     o_n = conv(layer_dict[l_] * self.filter_num,
                kernel_size=1,
                strides=1)(noise_crop)
     o = keras.layers.Add()([o, o_n])
     o = act_layer(o)
     o = Conv2DMod(layer_dict[l_] * self.filter_num,
                   kernel_size=3,
                   strides=1)([o, style])
     o_n = conv(layer_dict[l_] * self.filter_num,
                kernel_size=1,
                strides=1)(noise_crop)
     o = keras.layers.Add()([o, o_n])
     o = act_layer(o)
     return o
Exemple #5
0
    def build(self, input_shape):
        self.M = keras.Sequential(
            [keras.layers.InputLayer(input_shape=(self.latent_dim, ))])
        for _ in range(8):
            self.M.add(
                keras.layers.Dense(self.latent_dim,
                                   kernel_initializer="he_normal"))
            self.M.add(keras.layers.LeakyReLU(0.2))

        style_list = []
        inp = keras.layers.Input(shape=(self.latent_dim, ))
        noise = keras.layers.Input(shape=input_shape)
        outs = []
        self.n = 0
        for i, f in layer_dict.items():
            if i == min(layer_dict.keys()):
                style = keras.layers.Input(shape=(self.latent_dim, ))
                style_list.append(style)
                self.n += 1
                o = keras.layers.Dense(self.latent_dim)(inp)
                o = o[:, tf.newaxis, tf.newaxis, :]
                o = up(o, size=i)
                o = Conv2DMod(f * self.filter_num, kernel_size=3,
                              strides=1)([o, style])
                noise_crop = keras.layers.Cropping2D(
                    (input_shape[0] - i) // 2)(noise)
                o_n = conv(f * self.filter_num, kernel_size=1,
                           strides=1)(noise_crop)
                o = keras.layers.Add()([o, o_n])
                o = act_layer(o)
                o = Conv2DMod(f * self.filter_num, kernel_size=3,
                              strides=1)([o, style])
                o_n = conv(f * self.filter_num, kernel_size=1,
                           strides=1)(noise_crop)
                o = keras.layers.Add()([o, o_n])
                o = act_layer(o)
                rgb = Conv2DMod(input_shape[-1],
                                kernel_size=1,
                                strides=1,
                                demod=False)([o, style])
                outs.append(up(rgb, size=input_shape[0] // i))
            else:
                style = keras.layers.Input(shape=(self.latent_dim, ))
                style_list.append(style)
                self.n += 1
                o = up(o, size=2)
                o = Conv2DMod(f * self.filter_num, kernel_size=3,
                              strides=1)([o, style])
                noise_crop = keras.layers.Cropping2D(
                    (input_shape[0] - i) // 2)(noise)
                o_n = conv(f * self.filter_num, kernel_size=1,
                           strides=1)(noise_crop)
                o = keras.layers.Add()([o, o_n])
                o = act_layer(o)
                o = Conv2DMod(f * self.filter_num, kernel_size=3,
                              strides=1)([o, style])
                o_n = conv(f * self.filter_num, kernel_size=1,
                           strides=1)(noise_crop)
                o = keras.layers.Add()([o, o_n])
                o = act_layer(o)
                rgb = Conv2DMod(input_shape[-1],
                                kernel_size=1,
                                strides=1,
                                demod=False)([o, style])
                outs.append(up(rgb, size=input_shape[0] // i))
            if i == input_shape[0]:
                self.m = self.n
                break
        o = keras.layers.Add()(outs)
        # o = keras.layers.Activation("tanh")(o)
        self._gen = keras.Model(inputs=[inp, noise] + style_list, outputs=o)

        style_list_n = []
        o_list = []
        inp = keras.layers.Input(shape=(self.latent_dim, ))
        noise = keras.layers.Input(shape=input_shape)
        for _ in style_list:
            style = keras.layers.Input(shape=(self.latent_dim, ))
            style_list_n.append(style)
            o_list.append(self.M(style))
        o = self._gen([inp, noise] + o_list)
        self.gen = keras.Model(inputs=[inp, noise] + style_list_n, outputs=o)

        img = keras.layers.Input(shape=input_shape)
        o = AugmentLayer()(img)
        for i, f in reversed(layer_dict.items()):
            if i < input_shape[0]:
                res = conv(f * self.filter_num, kernel_size=3, strides=2)(o)
                o = conv(f * self.filter_num, kernel_size=3, strides=2)(o)
                o = norm_layer(o)
                o = act_layer(o)
                o = conv(f * self.filter_num, kernel_size=3, strides=1)(o)
                o = norm_layer(o)
                o = keras.layers.Add()([res, o])
                o = act_layer(o)
        o = keras.layers.Flatten()(o)
        self.dis = keras.Model(inputs=img, outputs=o)
        self.perform_gp = True
        self.perform_pl = True
        self.pl_mean = 0
        self.pl_length = 0.
Exemple #6
0
    def build(self, input_shape):
        assert input_shape[0] >= 32
        self.M = keras.Sequential(
            [keras.layers.InputLayer(input_shape=(self.latent_dim, ))])
        for _ in range(5):
            self.M.add(
                keras.layers.Dense(self.latent_dim,
                                   kernel_initializer="he_normal"))
            self.M.add(keras.layers.LeakyReLU(0.2))

        def up(i, style, noise, l, l_):
            o = keras.layers.UpSampling2D(size=l_ // l,
                                          interpolation="bilinear")(i)
            o = Conv2DMod(layer_dict[l_] * self.filter_num,
                          kernel_size=3,
                          strides=1)([o, style])
            noise_crop = keras.layers.Cropping2D(
                (input_shape[0] - l_) // 2)(noise)
            o_n = conv(layer_dict[l_] * self.filter_num,
                       kernel_size=1,
                       strides=1)(noise_crop)
            o = keras.layers.Add()([o, o_n])
            o = act_layer(o)
            o = Conv2DMod(layer_dict[l_] * self.filter_num,
                          kernel_size=3,
                          strides=1)([o, style])
            o_n = conv(layer_dict[l_] * self.filter_num,
                       kernel_size=1,
                       strides=1)(noise_crop)
            o = keras.layers.Add()([o, o_n])
            o = act_layer(o)
            return o

        def down(i, style, noise, l, l_):
            o = Conv2DMod(layer_dict[l_] * self.filter_num,
                          kernel_size=3,
                          strides=l // l_)([i, style])
            noise_crop = keras.layers.Cropping2D(
                (input_shape[0] - l_) // 2)(noise)
            o_n = conv(layer_dict[l_] * self.filter_num,
                       kernel_size=1,
                       strides=1)(noise_crop)
            o = keras.layers.Add()([o, o_n])
            o = act_layer(o)
            o = Conv2DMod(layer_dict[l_] * self.filter_num,
                          kernel_size=3,
                          strides=1)([o, style])
            o_n = conv(layer_dict[l_] * self.filter_num,
                       kernel_size=1,
                       strides=1)(noise_crop)
            o = keras.layers.Add()([o, o_n])
            o = act_layer(o)
            return o

        def c(i, style, noise, l, l_):
            o = Conv2DMod(layer_dict[l_] * self.filter_num,
                          kernel_size=3,
                          strides=1)([i, style])
            noise_crop = keras.layers.Cropping2D(
                (input_shape[0] - l_) // 2)(noise)
            o_n = conv(layer_dict[l_] * self.filter_num,
                       kernel_size=1,
                       strides=1)(noise_crop)
            o = keras.layers.Add()([o, o_n])
            o = act_layer(o)
            o = Conv2DMod(layer_dict[l_] * self.filter_num,
                          kernel_size=3,
                          strides=1)([o, style])
            o_n = conv(layer_dict[l_] * self.filter_num,
                       kernel_size=1,
                       strides=1)(noise_crop)
            o = keras.layers.Add()([o, o_n])
            o = act_layer(o)
            return o

        style_list = []
        inp = keras.layers.Input(shape=(self.latent_dim, ))
        noise = keras.layers.Input(shape=input_shape)
        outs = []

        o_dict = {}
        l_list = [min(layer_dict.keys())]
        for l, s in layer_dict.items():
            if l == min(layer_dict.keys()):
                style = keras.layers.Input(shape=(self.latent_dim, ))
                style_list.append(style)
                o = keras.layers.Dense(self.latent_dim)(inp)
                o = o[:, tf.newaxis, tf.newaxis, :]
                o = up(o, style, noise, 1, l)
                o_dict[min(layer_dict.keys())] = o
            if l <= input_shape[0]:
                l_list.append(l)
                t_dict = dict()

                for l in l_list[:-1]:
                    for l_ in l_list:
                        t_dict.setdefault(l_, [])
                        style = keras.layers.Input(shape=(self.latent_dim, ))
                        style_list.append(style)
                        if l == l_:
                            o = c(o_dict[l], style, noise, l, l_)
                            t_dict[l_].append(o)
                        elif l > l_:
                            o = down(o_dict[l], style, noise, l, l_)
                            t_dict[l_].append(o)
                        else:
                            o = up(o_dict[l], style, noise, l, l_)
                            t_dict[l_].append(o)

                for l, os in t_dict.items():
                    if len(os) == 1:
                        o = os[0]
                    else:
                        o = keras.layers.Add()(os)

                    o = conv(filters=layer_dict[l] * self.filter_num,
                             kernel_size=1,
                             strides=1)(o)
                    o = norm_layer(o)
                    o = act_layer(o)

                    o_dict[l] = o

        for l, o in o_dict.items():
            style = keras.layers.Input(shape=(self.latent_dim, ))
            style_list.append(style)
            rgb = Conv2DMod(input_shape[-1],
                            kernel_size=1,
                            strides=1,
                            demod=False)([o, style])
            rgb = keras.layers.UpSampling2D(size=input_shape[1] // l,
                                            interpolation="bilinear")(rgb)
            outs.append(rgb)
        o = keras.layers.Add()(outs)
        self._gen = keras.Model(inputs=[inp, noise] + style_list, outputs=o)

        style_list_n = []
        o_list = []
        inp = keras.layers.Input(shape=(self.latent_dim, ))
        noise = keras.layers.Input(shape=input_shape)
        self.n = len(style_list)
        self.m = self.n
        for _ in style_list:
            style = keras.layers.Input(shape=(self.latent_dim, ))
            style_list_n.append(style)
            o_list.append(self.M(style))
        o = self._gen([inp, noise] + o_list)
        self.gen = keras.Model(inputs=[inp, noise] + style_list_n, outputs=o)

        def up(i, l, l_):
            o = convt(filters=layer_dict[l_] * self.filter_num,
                      kernel_size=3,
                      strides=l_ // l)(i)
            o = norm_layer(o)
            o = act_layer(o)
            return o

        def down(i, l, l_):
            o = conv(filters=layer_dict[l_] * self.filter_num,
                     kernel_size=3,
                     strides=l // l_)(i)
            o = norm_layer(o)
            o = act_layer(o)
            return o

        def c(i, l, l_):
            o = conv(filters=layer_dict[l_] * self.filter_num,
                     kernel_size=3,
                     strides=1)(i)
            o = norm_layer(o)
            o = act_layer(o)
            return o

        images = keras.layers.Input(shape=input_shape)
        o = AugmentLayer()(images)

        o_dict = dict()
        l_list = []
        for l, s in reversed(layer_dict.items()):
            if l == input_shape[0]:
                o = conv(filters=layer_dict[l] * self.filter_num,
                         kernel_size=3,
                         strides=1)(o)
                o = norm_layer(o)
                o = act_layer(o)
                o_dict[l] = o
                l_list.append(l)
            if l < input_shape[0]:
                l_list.append(l)
                t_dict = dict()
                for l in l_list[:-1]:
                    for l_ in l_list:
                        t_dict.setdefault(l_, [])
                        if l == l_:
                            o = c(o_dict[l], l, l_)
                            # o = o_dict[l]
                            t_dict[l_].append(o)
                        elif l > l_:
                            o = down(o_dict[l], l, l_)
                            t_dict[l_].append(o)
                        else:
                            o = up(o_dict[l], l, l_)
                            t_dict[l_].append(o)

                for l, os in t_dict.items():
                    if len(os) == 1:
                        o = os[0]
                    else:
                        o = keras.layers.Add()(os)

                    o = conv(filters=64, kernel_size=1, strides=1)(o)
                    o = norm_layer(o)
                    o = act_layer(o)
                    o = conv(filters=64, kernel_size=3, strides=1)(o)
                    o = norm_layer(o)
                    o = act_layer(o)
                    o = conv(filters=64, kernel_size=1, strides=1)(o)
                    o = norm_layer(o)
                    o = act_layer(o)

                    o_dict[l] = o

        os = []
        for l, o in o_dict.items():
            if l >= 32:
                o = conv(filters=input_shape[-1], kernel_size=1, strides=1)(o)
                o = norm_layer(o)
                o = act_layer(o)
                os.append(o)
        o = keras.layers.Flatten()(o_dict[min(layer_dict.keys())])
        self.dis = keras.Model(inputs=images, outputs=[o] + os)
        self.perform_gp = True
        self.perform_pl = True
        self.pl_mean = 0
        self.pl_length = 0.