Exemple #1
0
def get_model(img_shape=(128, 128, 3), filter_num=16):
    inputs = keras.layers.Input(shape=(img_shape))
    o = inputs
    for l, s in reversed(layer_dict.items()):
        if l == img_shape[0]:
            o = keras.layers.Conv2D(filters=filter_num * s,
                                    kernel_size=3,
                                    strides=1,
                                    padding="SAME")(o)
            o = keras.layers.BatchNormalization()(o)
            o = keras.layers.LeakyReLU(0.2)(o)
        if l < img_shape[0]:
            o = keras.layers.Conv2D(filters=filter_num * s,
                                    kernel_size=3,
                                    strides=2,
                                    padding="SAME")(o)
            o = keras.layers.BatchNormalization()(o)
            o = keras.layers.LeakyReLU(0.2)(o)

    for l, s in layer_dict.items():
        if l == img_shape[0] // 2:
            break
        o = keras.layers.Conv2DTranspose(filters=filter_num * s,
                                         kernel_size=3,
                                         strides=2,
                                         padding="SAME")(o)
        o = keras.layers.BatchNormalization()(o)
        o = keras.layers.LeakyReLU(0.2)(o)

    o = keras.layers.Conv2DTranspose(filters=256,
                                     kernel_size=3,
                                     strides=2,
                                     padding="SAME")(o)
    return keras.Model(inputs=inputs, outputs=o)
Exemple #2
0
    def build(self, input_shape):
        noise = keras.layers.Input(shape=(self.latent_dim, ))
        for i, f in layer_dict.items():
            if i == min(layer_dict.keys()):
                o = keras.layers.Dense(i * i * f * self.filter_num)(noise)
                o = keras.layers.Reshape((i, i, f * self.filter_num))(o)
            else:
                o = keras.layers.Conv2DTranspose(
                    filters=f * self.filter_num,
                    kernel_size=5,
                    strides=2,
                    padding="SAME",
                    kernel_initializer=keras.initializers.RandomNormal(
                        mean=0.0, stddev=0.02))(o)
                o = keras.layers.BatchNormalization()(o)
                o = keras.layers.LeakyReLU(0.2)(o)
            if i == input_shape[0] // 2:
                break

        o = keras.layers.Conv2DTranspose(
            filters=input_shape[-1],
            kernel_size=5,
            strides=2,
            padding='SAME',
            kernel_initializer=keras.initializers.RandomNormal(mean=0.0,
                                                               stddev=0.02),
            activation="tanh")(o)
        self.gen = keras.Model(inputs=noise, outputs=o)

        img = keras.layers.Input(shape=input_shape)
        o = AugmentLayer()(img)
        for i, f in reversed(layer_dict.items()):
            if i < input_shape[0]:
                o = keras.layers.Conv2D(
                    filters=f * self.filter_num,
                    kernel_size=5,
                    strides=2,
                    padding="SAME",
                    kernel_initializer=keras.initializers.RandomNormal(
                        mean=0.0, stddev=0.02))(o)
                o = keras.layers.BatchNormalization()(o)
                o = keras.layers.LeakyReLU(0.2)(o)
        o = keras.layers.Flatten()(o)
        o = keras.layers.Dense(1)(o)

        self.dis = keras.Model(inputs=img, outputs=o)
Exemple #3
0
    def build(self, input_shape):
        noise = keras.layers.Input(shape=(self.latent_dim,))
        for i, f in layer_dict.items():
            if i == min(layer_dict.keys()):
                o = noise[:, tf.newaxis, tf.newaxis, :]
                o = convt(f * self.filter_num, kernel_size=min(layer_dict.keys()),
                          strides=1, padding="VALID")(o)
                o = norm_layer(o)
                o = act_layer(o)
                o = conv(f * self.filter_num, kernel_size=5, strides=1)(o)
                o = norm_layer(o)
                o = act_layer(o)
            else:
                o = convt(f * self.filter_num, kernel_size=3, strides=2)(o)
                o = norm_layer(o)
                o = act_layer(o)
                o = conv(f * self.filter_num, kernel_size=5, strides=1)(o)
                o = norm_layer(o)
                o = act_layer(o)
            if i == input_shape[0]:
                break

        o = conv(input_shape[-1], kernel_size=1, strides=1)(o)
        o = keras.layers.Activation("tanh")(o)
        self.gen = keras.Model(inputs=noise, outputs=o)

        img = keras.layers.Input(shape=input_shape)
        o = AugmentLayer()(img)
        o = img
        os = [o]
        for i, f in reversed(layer_dict.items()):
            if i < input_shape[0]:
                o = conv(f*self.filter_num, kernel_size=3, strides=2)(o)
                o = norm_layer(o)
                o = act_layer(o)
                o = conv(f * self.filter_num, kernel_size=5, strides=1)(o)
                o = norm_layer(o)
                o = act_layer(o)
                if i >= 32:
                    o_ = conv(input_shape[-1], kernel_size=1, strides=1)(o)
                    o_ = norm_layer(o_)
                    o_ = keras.layers.Activation("tanh")(o_)
                    os.append(o_)
        o = keras.layers.Flatten()(o)

        self.dis = keras.Model(inputs=img, outputs=[o]+os)
Exemple #4
0
def get_model(img_shape=(128, 128, 3), filter_num=16):
    inputs = keras.layers.Input(shape=(img_shape))
    o = inputs
    o_dict = dict()
    for l, s in reversed(layer_dict.items()):
        if l == img_shape[0]:
            o = keras.layers.Conv2D(filters=filter_num * s, kernel_size=3, strides=1, padding="SAME")(o)
            o = keras.layers.BatchNormalization()(o)
            o = keras.layers.LeakyReLU(0.2)(o)
            o_dict[l] = o
        if l < img_shape[0]:
            o = keras.layers.Conv2D(filters=filter_num * s, kernel_size=3, strides=2, padding="SAME")(o)
            o = keras.layers.BatchNormalization()(o)
            o = keras.layers.LeakyReLU(0.2)(o)
            o_dict[l] = o

    for l, s in layer_dict.items():
        if l == img_shape[0]:
            break
        o = keras.layers.Conv2DTranspose(filters=filter_num * layer_dict[l * 2], kernel_size=3, strides=2, padding="SAME")(o)
        o = keras.layers.BatchNormalization()(o)
        o = keras.layers.LeakyReLU(0.2)(o)

        o_ = keras.layers.Conv2D(filters=filter_num * layer_dict[l * 2], kernel_size=1, strides=1, padding="SAME")(o_dict[l * 2])
        o_ = keras.layers.BatchNormalization()(o_)
        o_ = keras.layers.LeakyReLU(0.2)(o_)
        o = keras.layers.Add()([o, o_])
        o_dict[l * 2] = o

    os = []
    for l, o in o_dict.items():
        o_ = keras.layers.Conv2DTranspose(filters=filter_num * layer_dict[img_shape[0]], kernel_size=1, strides=img_shape[0]//l, padding="SAME")(o)
        o_ = keras.layers.BatchNormalization()(o_)
        o_ = keras.layers.LeakyReLU(0.2)(o_)
        os.append(o_)
    o = keras.layers.Concatenate(axis=-1)(os)
    o = keras.layers.Conv2D(filters=256, kernel_size=1, strides=1, padding="SAME")(o)
    return keras.Model(inputs=inputs, outputs=o)
Exemple #5
0
 def build(self, input_shape):
     super(SpectralGAN, self).build(input_shape)
     img = keras.layers.Input(shape=input_shape)
     o = img
     for i, f in reversed(layer_dict.items()):
         if i < self.img_shape[0]:
             o = tfa.layers.SpectralNormalization(
                 keras.layers.Conv2D(
                     filters=f * self.filter_num,
                     kernel_size=5,
                     strides=2,
                     padding="SAME",
                     kernel_initializer=keras.initializers.RandomNormal(
                         mean=0.0, stddev=0.02)))(o)
             o = keras.layers.BatchNormalization()(o)
             o = keras.layers.LeakyReLU(0.2)(o)
     o = keras.layers.GlobalAveragePooling2D()(o)
     o = keras.layers.Dense(1)(o)
Exemple #6
0
def get_model(img_shape=(128, 128, 3), filter_num=16):
    inputs = keras.layers.Input(shape=(img_shape))
    o = inputs
    m = keras.applications.ResNet50(include_top=False, weights=None, input_shape=img_shape)
    o = m(o)
    org_o = keras.layers.Conv2D(filters=filter_num * layer_dict[o.shape[1]], kernel_size=1, strides=1)(o)
    org_o = keras.layers.BatchNormalization()(org_o)
    org_o = keras.layers.LeakyReLU(0.2)(org_o)

    rev_o = keras.layers.Conv2D(filters=filter_num * layer_dict[o.shape[1]], kernel_size=1, strides=1)(o)
    rev_o = keras.layers.BatchNormalization()(rev_o)
    rev_o = keras.layers.LeakyReLU(0.2)(rev_o)

    att = keras.layers.Activation("sigmoid")(-org_o)
    att_o = keras.layers.Multiply()([att, rev_o])
    att_o = keras.layers.Subtract()([org_o, att_o])

    rev_o = -rev_o
    for l, s in layer_dict.items():
        if l == img_shape[0]:
            break
        if l >= o.shape[1]:
            org_o = keras.layers.Conv2DTranspose(filters=filter_num * layer_dict[l * 2], kernel_size=3, strides=2, padding="SAME")(org_o)
            org_o = keras.layers.BatchNormalization()(org_o)
            org_o = keras.layers.LeakyReLU(0.2)(org_o)

            rev_o = keras.layers.Conv2DTranspose(filters=filter_num * layer_dict[l * 2], kernel_size=3, strides=2, padding="SAME")(rev_o)
            rev_o = keras.layers.BatchNormalization()(rev_o)
            rev_o = keras.layers.LeakyReLU(0.2)(rev_o)

            att_o = keras.layers.Conv2DTranspose(filters=filter_num * layer_dict[l * 2], kernel_size=3, strides=2, padding="SAME")(att_o)
            att_o = keras.layers.BatchNormalization()(att_o)
            att_o = keras.layers.LeakyReLU(0.2)(att_o)

    o = keras.layers.Concatenate(axis=-1)([org_o, rev_o, att_o])
    o = keras.layers.Conv2DTranspose(filters=256, kernel_size=1, strides=1, padding="SAME")(o)
    return keras.Model(inputs=inputs, outputs=o)
Exemple #7
0
def get_model(img_shape=(128, 128, 3), filter_num=16):
    inputs = keras.layers.Input(shape=(img_shape))
    o = inputs
    for l, s in reversed(layer_dict.items()):
        if l == img_shape[0]:
            o = keras.layers.Conv2D(filters=filter_num * s,
                                    kernel_size=3,
                                    strides=1,
                                    padding="SAME")(o)
            o = keras.layers.BatchNormalization()(o)
            o = keras.layers.LeakyReLU(0.2)(o)
        if l < img_shape[0]:
            o = keras.layers.Conv2D(filters=filter_num * s,
                                    kernel_size=3,
                                    strides=2,
                                    padding="SAME")(o)
            o = keras.layers.BatchNormalization()(o)
            o = keras.layers.LeakyReLU(0.2)(o)

    o = keras.layers.Conv2D(filters=filter_num * layer_dict[o.shape[1]],
                            kernel_size=3,
                            strides=1,
                            dilation_rate=2,
                            padding="SAME")(o)
    o = keras.layers.BatchNormalization()(o)
    o = keras.layers.LeakyReLU(0.2)(o)

    pool = keras.layers.AveragePooling2D(pool_size=img_shape[0] //
                                         o.shape[1])(inputs)

    c1 = keras.layers.Conv2D(filters=filter_num * layer_dict[o.shape[1]],
                             kernel_size=1,
                             strides=1,
                             padding="SAME")(o)
    c1 = keras.layers.BatchNormalization()(c1)
    c1 = keras.layers.LeakyReLU(0.2)(c1)

    c6 = keras.layers.Conv2D(filters=filter_num * layer_dict[o.shape[1]],
                             kernel_size=3,
                             strides=1,
                             dilation_rate=6,
                             padding="SAME")(o)
    c6 = keras.layers.BatchNormalization()(c6)
    c6 = keras.layers.LeakyReLU(0.2)(c6)

    c12 = keras.layers.Conv2D(filters=filter_num * layer_dict[o.shape[1]],
                              kernel_size=3,
                              strides=1,
                              dilation_rate=12,
                              padding="SAME")(o)
    c12 = keras.layers.BatchNormalization()(c12)
    c12 = keras.layers.LeakyReLU(0.2)(c12)

    c18 = keras.layers.Conv2D(filters=filter_num * layer_dict[o.shape[1]],
                              kernel_size=3,
                              strides=1,
                              dilation_rate=18,
                              padding="SAME")(o)
    c18 = keras.layers.BatchNormalization()(c18)
    c18 = keras.layers.LeakyReLU(0.2)(c18)

    o = keras.layers.Concatenate(axis=-1)([pool, c1, c6, c12, c18])
    o = keras.layers.Conv2D(filters=filter_num * layer_dict[o.shape[1]],
                            kernel_size=1,
                            strides=1,
                            padding="SAME")(o)
    o = keras.layers.BatchNormalization()(o)
    o = keras.layers.LeakyReLU(0.2)(o)

    for l, s in layer_dict.items():
        if l == img_shape[0] // 2:
            break
        o = keras.layers.Conv2DTranspose(filters=filter_num *
                                         layer_dict[o.shape[1]],
                                         kernel_size=1,
                                         strides=2,
                                         padding="SAME")(o)
        o = keras.layers.BatchNormalization()(o)
        o = keras.layers.LeakyReLU(0.2)(o)

    o = keras.layers.Conv2DTranspose(filters=256,
                                     kernel_size=1,
                                     strides=2,
                                     padding="SAME")(o)
    return keras.Model(inputs=inputs, outputs=o)
Exemple #8
0
    def build(self, input_shape):
        self.M = keras.Sequential(
            [keras.layers.InputLayer(input_shape=(self.latent_dim, ))])
        for _ in range(8):
            self.M.add(
                keras.layers.Dense(self.latent_dim,
                                   kernel_initializer="he_normal"))
            self.M.add(keras.layers.LeakyReLU(0.2))

        style_list = []
        inp = keras.layers.Input(shape=(self.latent_dim, ))
        noise = keras.layers.Input(shape=input_shape)
        outs = []
        self.n = 0
        for i, f in layer_dict.items():
            if i == min(layer_dict.keys()):
                style = keras.layers.Input(shape=(self.latent_dim, ))
                style_list.append(style)
                self.n += 1
                o = keras.layers.Dense(self.latent_dim)(inp)
                o = o[:, tf.newaxis, tf.newaxis, :]
                o = up(o, size=i)
                o = Conv2DMod(f * self.filter_num, kernel_size=3,
                              strides=1)([o, style])
                noise_crop = keras.layers.Cropping2D(
                    (input_shape[0] - i) // 2)(noise)
                o_n = conv(f * self.filter_num, kernel_size=1,
                           strides=1)(noise_crop)
                o = keras.layers.Add()([o, o_n])
                o = act_layer(o)
                o = Conv2DMod(f * self.filter_num, kernel_size=3,
                              strides=1)([o, style])
                o_n = conv(f * self.filter_num, kernel_size=1,
                           strides=1)(noise_crop)
                o = keras.layers.Add()([o, o_n])
                o = act_layer(o)
                rgb = Conv2DMod(input_shape[-1],
                                kernel_size=1,
                                strides=1,
                                demod=False)([o, style])
                outs.append(up(rgb, size=input_shape[0] // i))
            else:
                style = keras.layers.Input(shape=(self.latent_dim, ))
                style_list.append(style)
                self.n += 1
                o = up(o, size=2)
                o = Conv2DMod(f * self.filter_num, kernel_size=3,
                              strides=1)([o, style])
                noise_crop = keras.layers.Cropping2D(
                    (input_shape[0] - i) // 2)(noise)
                o_n = conv(f * self.filter_num, kernel_size=1,
                           strides=1)(noise_crop)
                o = keras.layers.Add()([o, o_n])
                o = act_layer(o)
                o = Conv2DMod(f * self.filter_num, kernel_size=3,
                              strides=1)([o, style])
                o_n = conv(f * self.filter_num, kernel_size=1,
                           strides=1)(noise_crop)
                o = keras.layers.Add()([o, o_n])
                o = act_layer(o)
                rgb = Conv2DMod(input_shape[-1],
                                kernel_size=1,
                                strides=1,
                                demod=False)([o, style])
                outs.append(up(rgb, size=input_shape[0] // i))
            if i == input_shape[0]:
                self.m = self.n
                break
        o = keras.layers.Add()(outs)
        # o = keras.layers.Activation("tanh")(o)
        self._gen = keras.Model(inputs=[inp, noise] + style_list, outputs=o)

        style_list_n = []
        o_list = []
        inp = keras.layers.Input(shape=(self.latent_dim, ))
        noise = keras.layers.Input(shape=input_shape)
        for _ in style_list:
            style = keras.layers.Input(shape=(self.latent_dim, ))
            style_list_n.append(style)
            o_list.append(self.M(style))
        o = self._gen([inp, noise] + o_list)
        self.gen = keras.Model(inputs=[inp, noise] + style_list_n, outputs=o)

        img = keras.layers.Input(shape=input_shape)
        o = AugmentLayer()(img)
        for i, f in reversed(layer_dict.items()):
            if i < input_shape[0]:
                res = conv(f * self.filter_num, kernel_size=3, strides=2)(o)
                o = conv(f * self.filter_num, kernel_size=3, strides=2)(o)
                o = norm_layer(o)
                o = act_layer(o)
                o = conv(f * self.filter_num, kernel_size=3, strides=1)(o)
                o = norm_layer(o)
                o = keras.layers.Add()([res, o])
                o = act_layer(o)
        o = keras.layers.Flatten()(o)
        self.dis = keras.Model(inputs=img, outputs=o)
        self.perform_gp = True
        self.perform_pl = True
        self.pl_mean = 0
        self.pl_length = 0.
Exemple #9
0
def get_model(img_shape=(128, 128, 3), filter_num=16, version="v1"):
    inputs = keras.layers.Input(shape=(img_shape))
    o = inputs
    o_dict = dict()
    l_list = []
    for l, s in reversed(layer_dict.items()):
        if l == img_shape[0]:
            o = keras.layers.Conv2D(filters=filter_num * s, kernel_size=3, strides=1, padding="SAME")(o)
            o = keras.layers.BatchNormalization()(o)
            o = keras.layers.LeakyReLU(0.2)(o)
            o_dict[l] = o
            l_list.append(l)

        if l < img_shape[0]:
            l_list.append(l)
            t_dict = dict()
            for l in l_list[:-1]:
                for l_ in l_list:
                    t_dict.setdefault(l_, [])
                    if l == l_:
                        o = keras.layers.Conv2D(filters=filter_num * layer_dict[l_], kernel_size=3, strides=1,
                                                padding="SAME")(o_dict[l])
                        o = keras.layers.BatchNormalization()(o)
                        o = keras.layers.LeakyReLU(0.2)(o)
                        t_dict[l_].append(o)
                    elif l > l_:
                        o = keras.layers.Conv2D(filters=filter_num * layer_dict[l_], kernel_size=3, strides=l // l_,
                                                 padding="SAME")(o_dict[l])
                        o = keras.layers.BatchNormalization()(o)
                        o = keras.layers.LeakyReLU(0.2)(o)
                        t_dict[l_].append(o)
                    else:
                        o = keras.layers.Conv2DTranspose(filters=filter_num * layer_dict[l_], kernel_size=3,
                                                          strides=l_ // l, padding="SAME")(o_dict[l])
                        o = keras.layers.BatchNormalization()(o)
                        o = keras.layers.LeakyReLU(0.2)(o)
                        t_dict[l_].append(o)

            for l, os in t_dict.items():
                if len(os) == 1:
                    o = os[0]
                else:
                    o = keras.layers.Concatenate(axis=-1)(os)

                o = keras.layers.Conv2D(filters=filter_num * layer_dict[l], kernel_size=3, strides=1, padding="SAME")(o)
                o = keras.layers.BatchNormalization()(o)
                o = keras.layers.LeakyReLU(0.2)(o)

                o_dict[l] = o
    if version == "v1":
        o = keras.layers.Conv2D(256, kernel_size=3, strides=1, padding="SAME")(o_dict[img_shape[0]])
    elif version == "v2":
        os = []
        for l, o in o_dict.items():
            o_ = keras.layers.Conv2DTranspose(filters=filter_num * layer_dict[l], kernel_size=3, strides=img_shape[0]//l, padding="SAME")(o)
            o_ = keras.layers.BatchNormalization()(o_)
            o_ = keras.layers.LeakyReLU(0.2)(o_)
            os.append(o_)
        o = keras.layers.Concatenate(axis=-1)(os)
        o = keras.layers.Conv2D(256, kernel_size=3, strides=1, padding="SAME")(o)
    else:
        raise Exception("wrong version")
    return keras.Model(inputs=inputs, outputs=o)
Exemple #10
0
    def build(self, input_shape):
        assert input_shape[0] >= 32
        self.M = keras.Sequential(
            [keras.layers.InputLayer(input_shape=(self.latent_dim, ))])
        for _ in range(5):
            self.M.add(
                keras.layers.Dense(self.latent_dim,
                                   kernel_initializer="he_normal"))
            self.M.add(keras.layers.LeakyReLU(0.2))

        def up(i, style, noise, l, l_):
            o = keras.layers.UpSampling2D(size=l_ // l,
                                          interpolation="bilinear")(i)
            o = Conv2DMod(layer_dict[l_] * self.filter_num,
                          kernel_size=3,
                          strides=1)([o, style])
            noise_crop = keras.layers.Cropping2D(
                (input_shape[0] - l_) // 2)(noise)
            o_n = conv(layer_dict[l_] * self.filter_num,
                       kernel_size=1,
                       strides=1)(noise_crop)
            o = keras.layers.Add()([o, o_n])
            o = act_layer(o)
            o = Conv2DMod(layer_dict[l_] * self.filter_num,
                          kernel_size=3,
                          strides=1)([o, style])
            o_n = conv(layer_dict[l_] * self.filter_num,
                       kernel_size=1,
                       strides=1)(noise_crop)
            o = keras.layers.Add()([o, o_n])
            o = act_layer(o)
            return o

        def down(i, style, noise, l, l_):
            o = Conv2DMod(layer_dict[l_] * self.filter_num,
                          kernel_size=3,
                          strides=l // l_)([i, style])
            noise_crop = keras.layers.Cropping2D(
                (input_shape[0] - l_) // 2)(noise)
            o_n = conv(layer_dict[l_] * self.filter_num,
                       kernel_size=1,
                       strides=1)(noise_crop)
            o = keras.layers.Add()([o, o_n])
            o = act_layer(o)
            o = Conv2DMod(layer_dict[l_] * self.filter_num,
                          kernel_size=3,
                          strides=1)([o, style])
            o_n = conv(layer_dict[l_] * self.filter_num,
                       kernel_size=1,
                       strides=1)(noise_crop)
            o = keras.layers.Add()([o, o_n])
            o = act_layer(o)
            return o

        def c(i, style, noise, l, l_):
            o = Conv2DMod(layer_dict[l_] * self.filter_num,
                          kernel_size=3,
                          strides=1)([i, style])
            noise_crop = keras.layers.Cropping2D(
                (input_shape[0] - l_) // 2)(noise)
            o_n = conv(layer_dict[l_] * self.filter_num,
                       kernel_size=1,
                       strides=1)(noise_crop)
            o = keras.layers.Add()([o, o_n])
            o = act_layer(o)
            o = Conv2DMod(layer_dict[l_] * self.filter_num,
                          kernel_size=3,
                          strides=1)([o, style])
            o_n = conv(layer_dict[l_] * self.filter_num,
                       kernel_size=1,
                       strides=1)(noise_crop)
            o = keras.layers.Add()([o, o_n])
            o = act_layer(o)
            return o

        style_list = []
        inp = keras.layers.Input(shape=(self.latent_dim, ))
        noise = keras.layers.Input(shape=input_shape)
        outs = []

        o_dict = {}
        l_list = [min(layer_dict.keys())]
        for l, s in layer_dict.items():
            if l == min(layer_dict.keys()):
                style = keras.layers.Input(shape=(self.latent_dim, ))
                style_list.append(style)
                o = keras.layers.Dense(self.latent_dim)(inp)
                o = o[:, tf.newaxis, tf.newaxis, :]
                o = up(o, style, noise, 1, l)
                o_dict[min(layer_dict.keys())] = o
            if l <= input_shape[0]:
                l_list.append(l)
                t_dict = dict()

                for l in l_list[:-1]:
                    for l_ in l_list:
                        t_dict.setdefault(l_, [])
                        style = keras.layers.Input(shape=(self.latent_dim, ))
                        style_list.append(style)
                        if l == l_:
                            o = c(o_dict[l], style, noise, l, l_)
                            t_dict[l_].append(o)
                        elif l > l_:
                            o = down(o_dict[l], style, noise, l, l_)
                            t_dict[l_].append(o)
                        else:
                            o = up(o_dict[l], style, noise, l, l_)
                            t_dict[l_].append(o)

                for l, os in t_dict.items():
                    if len(os) == 1:
                        o = os[0]
                    else:
                        o = keras.layers.Add()(os)

                    o = conv(filters=layer_dict[l] * self.filter_num,
                             kernel_size=1,
                             strides=1)(o)
                    o = norm_layer(o)
                    o = act_layer(o)

                    o_dict[l] = o

        for l, o in o_dict.items():
            style = keras.layers.Input(shape=(self.latent_dim, ))
            style_list.append(style)
            rgb = Conv2DMod(input_shape[-1],
                            kernel_size=1,
                            strides=1,
                            demod=False)([o, style])
            rgb = keras.layers.UpSampling2D(size=input_shape[1] // l,
                                            interpolation="bilinear")(rgb)
            outs.append(rgb)
        o = keras.layers.Add()(outs)
        self._gen = keras.Model(inputs=[inp, noise] + style_list, outputs=o)

        style_list_n = []
        o_list = []
        inp = keras.layers.Input(shape=(self.latent_dim, ))
        noise = keras.layers.Input(shape=input_shape)
        self.n = len(style_list)
        self.m = self.n
        for _ in style_list:
            style = keras.layers.Input(shape=(self.latent_dim, ))
            style_list_n.append(style)
            o_list.append(self.M(style))
        o = self._gen([inp, noise] + o_list)
        self.gen = keras.Model(inputs=[inp, noise] + style_list_n, outputs=o)

        def up(i, l, l_):
            o = convt(filters=layer_dict[l_] * self.filter_num,
                      kernel_size=3,
                      strides=l_ // l)(i)
            o = norm_layer(o)
            o = act_layer(o)
            return o

        def down(i, l, l_):
            o = conv(filters=layer_dict[l_] * self.filter_num,
                     kernel_size=3,
                     strides=l // l_)(i)
            o = norm_layer(o)
            o = act_layer(o)
            return o

        def c(i, l, l_):
            o = conv(filters=layer_dict[l_] * self.filter_num,
                     kernel_size=3,
                     strides=1)(i)
            o = norm_layer(o)
            o = act_layer(o)
            return o

        images = keras.layers.Input(shape=input_shape)
        o = AugmentLayer()(images)

        o_dict = dict()
        l_list = []
        for l, s in reversed(layer_dict.items()):
            if l == input_shape[0]:
                o = conv(filters=layer_dict[l] * self.filter_num,
                         kernel_size=3,
                         strides=1)(o)
                o = norm_layer(o)
                o = act_layer(o)
                o_dict[l] = o
                l_list.append(l)
            if l < input_shape[0]:
                l_list.append(l)
                t_dict = dict()
                for l in l_list[:-1]:
                    for l_ in l_list:
                        t_dict.setdefault(l_, [])
                        if l == l_:
                            o = c(o_dict[l], l, l_)
                            # o = o_dict[l]
                            t_dict[l_].append(o)
                        elif l > l_:
                            o = down(o_dict[l], l, l_)
                            t_dict[l_].append(o)
                        else:
                            o = up(o_dict[l], l, l_)
                            t_dict[l_].append(o)

                for l, os in t_dict.items():
                    if len(os) == 1:
                        o = os[0]
                    else:
                        o = keras.layers.Add()(os)

                    o = conv(filters=64, kernel_size=1, strides=1)(o)
                    o = norm_layer(o)
                    o = act_layer(o)
                    o = conv(filters=64, kernel_size=3, strides=1)(o)
                    o = norm_layer(o)
                    o = act_layer(o)
                    o = conv(filters=64, kernel_size=1, strides=1)(o)
                    o = norm_layer(o)
                    o = act_layer(o)

                    o_dict[l] = o

        os = []
        for l, o in o_dict.items():
            if l >= 32:
                o = conv(filters=input_shape[-1], kernel_size=1, strides=1)(o)
                o = norm_layer(o)
                o = act_layer(o)
                os.append(o)
        o = keras.layers.Flatten()(o_dict[min(layer_dict.keys())])
        self.dis = keras.Model(inputs=images, outputs=[o] + os)
        self.perform_gp = True
        self.perform_pl = True
        self.pl_mean = 0
        self.pl_length = 0.