Example #1
0
    def __init__(self,
                 passes,
                 backbone_out_channels,
                 outs_channels,
                 depth,
                 growth_rate,
                 use_bn,
                 in_channels=3,
                 in_size=(256, 256),
                 data_format="channels_last",
                 **kwargs):
        super(IbpPose, self).__init__(**kwargs)
        self.in_size = in_size
        self.data_format = data_format
        activation = nn.LeakyReLU(alpha=0.01)

        self.backbone = IbpBackbone(
            in_channels=in_channels,
            out_channels=backbone_out_channels,
            activation=activation,
            data_format=data_format,
            name="backbone")

        self.decoder = SimpleSequential(name="decoder")
        for i in range(passes):
            merge = (i != passes - 1)
            self.decoder.add(IbpPass(
                channels=backbone_out_channels,
                mid_channels=outs_channels,
                depth=depth,
                growth_rate=growth_rate,
                merge=merge,
                use_bn=use_bn,
                activation=activation,
                data_format=data_format,
                name="pass{}".format(i + 1)))
Example #2
0
    def __init__(self, hparams, *args, **kwargs):
        super(DeepCNN, self).__init__(*args, **kwargs)
        self.layers_count = hparams[config.HP_DEEP_LAYERS]
        self.dual_output = hparams[config.HP_LOSS_TYPE] == "DUAL_BCE"
        self.input_len = 500

        self.down_res_layers = [
            ChannelDownResLayer(
                hparams[config.HP_DEEP_CHANNELS] * ((l + 3) // 2),
                kernel_size=max(3,
                                hparams[config.HP_DEEP_KERNEL_SIZE] - 2 * l))
            for l in range(self.layers_count - 1)
        ]
        self.down_res_layer_final = ChannelDownResLayer(
            hparams[config.HP_DEEP_CHANNELS] * ((self.layers_count + 2) // 2),
            kernel_size=max(
                3, hparams[config.HP_DEEP_KERNEL_SIZE] - 2 *
                (self.layers_count - 1)),
            last_layer=True)

        self.feature_pool = layers.GlobalAveragePooling1D()
        self.lrelu_out = layers.LeakyReLU()

        if hparams[config.HP_LOSS_TYPE] == "MSE":
            activation = None
        else:
            activation = 'sigmoid'

        self.dense_out = layers.Dense(units=2, activation=activation)

        self.dense_out_a = layers.Dense(units=1,
                                        activation=activation,
                                        name="arousal_class")
        self.dense_out_v = layers.Dense(units=1,
                                        activation=activation,
                                        name="valence_class")
Example #3
0
def pix2pix_discriminator(input_shape=(None, None, 3), norm_type="batch"):
    initializer = tf.random_normal_initializer(0.0, 0.02)

    input_image = layers.Input(shape=input_shape, name="input_image")
    target_image = layers.Input(shape=input_shape, name="target_image")
    x = layers.concatenate([input_image, target_image])

    x = downscale(x, 64, 4, apply_norm=False)
    x = downscale(x, 128, 4, norm_type=norm_type)
    x = downscale(x, 256, 4, norm_type=norm_type)

    x = layers.ZeroPadding2D()(x)
    x = layers.Conv2D(
        filters=512,
        kernel_size=4,
        strides=1,
        kernel_initializer=initializer,
        use_bias=False,
    )(x)

    if norm_type == "batch":
        x = layers.BatchNormalization()(x)
    elif norm_type == "instance":
        x = tfa.layers.InstanceNormalization()(x)
    else:
        raise Exception(f"Norm type not recognized: {norm_type}")

    x = layers.LeakyReLU()(x)
    x = layers.ZeroPadding2D()(x)

    markov_rf = layers.Conv2D(filters=1,
                              kernel_size=4,
                              strides=1,
                              kernel_initializer=initializer)(x)

    return keras.Model(inputs=[input_image, target_image], outputs=markov_rf)
def ConvNet1D(in_shape, name):
    def temporal_convolution_block(block, filters, kernel_size, inputs):
        x = layers.Conv1D(filters=filters,
                          kernel_size=kernel_size,
                          padding='VALID',
                          name='{}_temporal_conv_1'.format(block))(inputs)
        x = layers.TimeDistributed(layers.BatchNormalization(),
                                   name='{}_bn_1'.format(block))(x)
        x = layers.Conv1D(filters=filters,
                          kernel_size=kernel_size,
                          padding='VALID',
                          name='{}_temporal_conv_2'.format(block))(x)
        x = layers.TimeDistributed(layers.BatchNormalization(),
                                   name='{}_bn_2'.format(block))(x)
        out = layers.TimeDistributed(layers.LeakyReLU(alpha=0.01),
                                     name='{}_leaky_relu'.format(block))(x)
        return out

    inputs = layers.Input(in_shape)
    x = layers.TimeDistributed(layers.GlobalAveragePooling2D(),
                               name='global_avg_pool')(inputs)
    x = temporal_convolution_block('block_1', 10, 3, x)
    x = temporal_convolution_block('block_2', 10, 3, x)
    x = temporal_convolution_block('block_3', 10, 3, x)
    x = temporal_convolution_block('block_4', 10, 3, x)
    x = layers.Conv1D(filters=100,
                      kernel_size=4,
                      padding='VALID',
                      name='temporal_conv')(x)
    x = layers.TimeDistributed(layers.BatchNormalization(),
                               name='batch_norm')(x)
    x = layers.TimeDistributed(layers.LeakyReLU(alpha=0.01),
                               name='out_leaky_relu')(x)
    x = layers.Flatten()(x)
    x = layers.Dropout(0.4)(x)  # for the following fully-connected layers
    return tf.keras.Model(inputs, x, name=name)
Example #5
0
 def __init__(self, channel=64, increment=32, alpha=0.2):
     super(RRDenseBlock, self).__init__()
     self.alpha = alpha
     self.conv1 = layers.Conv2D(filters=increment,
                                kernel_size=3,
                                strides=1,
                                padding='same')
     self.conv2 = layers.Conv2D(filters=increment,
                                kernel_size=3,
                                strides=1,
                                padding='same')
     self.conv3 = layers.Conv2D(filters=increment,
                                kernel_size=3,
                                strides=1,
                                padding='same')
     self.conv4 = layers.Conv2D(filters=increment,
                                kernel_size=3,
                                strides=1,
                                padding='same')
     self.lrelu = layers.LeakyReLU(alpha=0.2)
     self.convOut = layers.Conv2D(filters=channel,
                                  kernel_size=3,
                                  strides=1,
                                  padding='same')
    def __init__(self, initChannel=64, layerNum=5):
        super(Discriminator, self).__init__()
        self.conv1 = layers.Conv2D(filters=64,
                                   kernel_size=(3, 3),
                                   strides=(1, 1),
                                   padding='same')
        self.lrelu = layers.LeakyReLU(alpha=0.2)
        self.convBlocks = []
        for i in range(1, layerNum + 1):
            self.convBlocks.append(
                ConvBlock(filterNum=initChannel * i,
                          kernelSize=2,
                          strideSize=1,
                          padding='same'))
            self.convBlocks.append(
                ConvBlock(filterNum=initChannel * i,
                          kernelSize=2,
                          strideSize=2,
                          padding='same'))

        # self.flatten = layers.GlobalAveragePooling2D()
        self.flatten = layers.Flatten()
        self.dense1 = layers.Dense(512, activation='relu')
        self.dense2 = layers.Dense(1)  # output is logit
Example #7
0
def Discriminator(HEIGHT, WIDTH, alpha):
    initializer = tf.random_normal_initializer(0., 0.02)
    gamma_init = keras.initializers.RandomNormal(mean=0.0, stddev=0.02)

    inp = layers.Input(shape=[HEIGHT, WIDTH, 3], name='input_image')

    x = inp

    down1 = downsample(64, 4, alpha, False)(x)  # (bs, 128, 128, 64)
    down2 = downsample(128, 4, alpha)(down1)  # (bs, 64, 64, 128)
    down3 = downsample(256, 4, alpha)(down2)  # (bs, 32, 32, 256)

    attention = self_attention(256)
    att = attention(down3)

    zero_pad1 = layers.ZeroPadding2D()(att)  # (bs, 34, 34, 256)
    conv = layers.Conv2D(512,
                         4,
                         strides=1,
                         kernel_initializer=initializer,
                         use_bias=False)(zero_pad1)  # (bs, 31, 31, 512)

    norm1 = tfa.layers.InstanceNormalization(
        gamma_initializer=gamma_init)(conv)

    leaky_relu = layers.LeakyReLU(alpha)(norm1)

    zero_pad2 = layers.ZeroPadding2D()(leaky_relu)  # (bs, 33, 33, 512)

    attention1 = self_attention(512)
    att1 = attention1(zero_pad2)

    last = layers.Conv2D(1, 4, strides=1, kernel_initializer=initializer)(
        att1)  # (bs, 30, 30, 1)

    return tf.keras.Model(inputs=inp, outputs=last)
Example #8
0
def background():
    in_shape = (1,1,100,1)
    bg = tf.keras.Sequential()
    bg.add(layers.Dense(4*4,use_bias=False,input_shape=(100,)))
    bg.add(layers.BatchNormalization())
    bg.add(layers.LeakyReLU())

    bg.add(layers.Reshape((4,4,1,1)))

    bg.add(layers.Conv3DTranspose(512,(2,4,4),strides=1,use_bias=False,padding='same'))

    #outputs 8x8x4 with 256 channels
    bg.add(layers.Conv3DTranspose(256,4,strides=(2,2,1),use_bias=False,padding='same'))

    #outputs 16x16x8 with 128 channels
    bg.add(layers.Conv3DTranspose(128,4,strides=(2,2,1),use_bias=False,padding='same'))

    #outputs 32x32x16 with 64 channels
    bg.add(layers.Conv3DTranspose(128,4,strides=(2,2,1),use_bias=False,padding='same'))

    #outputs forground: 64x64x32 with 3 channels
    bg.add(layers.Conv3DTranspose(3,4,strides=(2,2,1),use_bias=False,padding='same',activation='tanh'))

    return bg
Example #9
0
    def make_encoder_model(self):

        model = tf.keras.Sequential()

        model.add(layers.BatchNormalization(input_shape=[512, 512, self.num_channels]))

        model.add(layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same'))
        model.add(layers.BatchNormalization())
        model.add(layers.LeakyReLU())

        model.add(layers.Conv2D(64, (3, 3), strides=(2, 2), padding='same'))
        model.add(layers.BatchNormalization())
        model.add(layers.LeakyReLU())

        model.add(layers.Conv2D(128, (3, 3), strides=(2, 2), padding='same'))
        model.add(layers.BatchNormalization())
        model.add(layers.LeakyReLU())

        model.add(layers.Conv2D(128, (3, 3), strides=(2, 2), padding='same'))
        model.add(layers.BatchNormalization())
        model.add(layers.LeakyReLU())

        model.add(layers.Conv2D(256, (3, 3), strides=(2, 2), padding='same'))
        model.add(layers.BatchNormalization())
        model.add(layers.LeakyReLU())

        model.add(layers.Conv2D(512, (3, 3), strides=(2, 2), padding='same'))
        model.add(layers.BatchNormalization())
        model.add(layers.LeakyReLU())

        model.add(layers.Conv2D(1024, (3, 3), strides=(2, 2), padding='same'))
        model.add(layers.BatchNormalization())
        model.add(layers.LeakyReLU())

        model.add(layers.Reshape((4*4*1024,)))
        model.add(layers.Dense(4096, use_bias=False))

        return model
Example #10
0
def make_discriminator_model():
    model = tf.keras.Sequential()
    model.add(
        layers.Conv2D(16, (4, 4),
                      strides=(2, 2),
                      padding='same',
                      input_shape=[256, 256, 3]))
    model.add(layers.LeakyReLU())
    model.add(layers.Dropout(0.3))

    model.add(layers.Conv2D(32, (4, 4), strides=(2, 2), padding='same'))
    model.add(layers.LeakyReLU())
    model.add(layers.Dropout(0.3))

    model.add(layers.Conv2D(64, (4, 4), strides=(2, 2), padding='same'))
    model.add(layers.LeakyReLU())
    model.add(layers.Dropout(0.3))

    model.add(layers.Conv2D(128, (4, 4), strides=(2, 2), padding='same'))
    model.add(layers.LeakyReLU())
    model.add(layers.Dropout(0.3))

    model.add(layers.Conv2D(265, (4, 4), strides=(2, 2), padding='same'))
    model.add(layers.LeakyReLU())
    model.add(layers.Dropout(0.3))

    model.add(layers.Conv2D(512, (4, 4), strides=(2, 2), padding='same'))
    model.add(layers.LeakyReLU())
    model.add(layers.Dropout(0.3))

    model.add(layers.Conv2D(1024, (4, 4), strides=(2, 2), padding='same'))
    model.add(layers.LeakyReLU())
    model.add(layers.Dropout(0.3))

    model.add(layers.Flatten())
    model.add(layers.Dense(1))

    return model
 def discriminator_block(input):
     conv1 = addon_layers.WeightNormalization(layers.Conv1D(
         16, 15, 1, "same"),
                                              data_init=False)(input)
     lrelu1 = layers.LeakyReLU()(conv1)
     conv2 = addon_layers.WeightNormalization(layers.Conv1D(64,
                                                            41,
                                                            4,
                                                            "same",
                                                            groups=4),
                                              data_init=False)(lrelu1)
     lrelu2 = layers.LeakyReLU()(conv2)
     conv3 = addon_layers.WeightNormalization(layers.Conv1D(256,
                                                            41,
                                                            4,
                                                            "same",
                                                            groups=16),
                                              data_init=False)(lrelu2)
     lrelu3 = layers.LeakyReLU()(conv3)
     conv4 = addon_layers.WeightNormalization(layers.Conv1D(1024,
                                                            41,
                                                            4,
                                                            "same",
                                                            groups=64),
                                              data_init=False)(lrelu3)
     lrelu4 = layers.LeakyReLU()(conv4)
     conv5 = addon_layers.WeightNormalization(layers.Conv1D(1024,
                                                            41,
                                                            4,
                                                            "same",
                                                            groups=256),
                                              data_init=False)(lrelu4)
     lrelu5 = layers.LeakyReLU()(conv5)
     conv6 = addon_layers.WeightNormalization(layers.Conv1D(
         1024, 5, 1, "same"),
                                              data_init=False)(lrelu5)
     lrelu6 = layers.LeakyReLU()(conv6)
     conv7 = addon_layers.WeightNormalization(layers.Conv1D(
         1, 3, 1, "same"),
                                              data_init=False)(lrelu6)
     return [lrelu1, lrelu2, lrelu3, lrelu4, lrelu5, lrelu6, conv7]
Example #12
0
    def make_generator_model(self):

        model = tf.keras.Sequential()

        model.add(layers.Dense(4*4*1024, use_bias=False, input_shape=(4096,), kernel_initializer=tf.random_normal_initializer(mean=0,stddev=0.01)))
        model.add(layers.BatchNormalization())
        model.add(layers.LeakyReLU())
        model.add(layers.Reshape((4, 4, 1024)))

        model.add(layers.Conv2DTranspose(512, (3, 3), strides=(2, 2), padding='same', use_bias=False, kernel_initializer=tf.random_normal_initializer(mean=0,stddev=0.01)))
        model.add(layers.BatchNormalization())
        model.add(layers.LeakyReLU())

        model.add(layers.Conv2DTranspose(256, (3, 3), strides=(2, 2), padding='same', use_bias=False, kernel_initializer=tf.random_normal_initializer(mean=0,stddev=0.01)))
        model.add(layers.BatchNormalization())
        model.add(layers.LeakyReLU())

        model.add(layers.Conv2DTranspose(128, (3, 3), strides=(2, 2), padding='same', use_bias=False, kernel_initializer=tf.random_normal_initializer(mean=0,stddev=0.01)))
        model.add(layers.BatchNormalization())
        model.add(layers.LeakyReLU())

        model.add(layers.Conv2DTranspose(128, (3, 3), strides=(2, 2), padding='same', use_bias=False, kernel_initializer=tf.random_normal_initializer(mean=0,stddev=0.01)))
        model.add(layers.BatchNormalization())
        model.add(layers.LeakyReLU())

        model.add(layers.Conv2DTranspose(64, (3, 3), strides=(2, 2), padding='same', use_bias=False, kernel_initializer=tf.random_normal_initializer(mean=0,stddev=0.01)))
        model.add(layers.BatchNormalization())
        model.add(layers.LeakyReLU())

        model.add(layers.Conv2DTranspose(64, (3, 3), strides=(2, 2), padding='same', use_bias=False, kernel_initializer=tf.random_normal_initializer(mean=0,stddev=0.01)))
        model.add(layers.BatchNormalization())
        model.add(layers.LeakyReLU())

        model.add(layers.Conv2DTranspose(self.num_channels, (5, 5), strides=(2, 2), padding='same', use_bias=False, kernel_initializer=tf.random_normal_initializer(mean=0,stddev=0.01), name="block7"))

        return model
Example #13
0
def discriminator_model():

    input1 = layers.Input((720, 1280, 3))
    input2 = layers.Input((720, 1280, 3))

    c11 = layers.Conv2D(128, (3, 3),
                        strides=(2, 2),
                        padding='same',
                        input_shape=[720, 1280, 3])(input1)
    c11 = layers.LeakyReLU()(c11)
    c11 = layers.Dropout(0.15)(c11)

    c12 = layers.Conv2D(128, (3, 3),
                        strides=(2, 2),
                        padding='same',
                        input_shape=[720, 1280, 3])(input2)
    c12 = layers.LeakyReLU()(c12)
    c12 = layers.Dropout(0.15)(c12)

    c21 = layers.Conv2D(64, (3, 3), strides=(2, 2), padding='same')(c11)
    c21 = layers.LeakyReLU()(c21)
    c21 = layers.Dropout(0.15)(c21)

    c22 = layers.Conv2D(64, (3, 3), strides=(2, 2), padding='same')(c12)
    c22 = layers.LeakyReLU()(c22)
    c22 = layers.Dropout(0.15)(c22)

    c3 = layers.concatenate([c21, c22])
    c3 = layers.Conv2D(32, (3, 3), strides=(2, 2), padding='same')(c3)
    c3 = layers.LeakyReLU()(c3)
    c3 = layers.Dropout(0.15)(c3)

    c4 = layers.Conv2D(16, (3, 3), strides=(2, 2), padding='same')(c3)
    c4 = layers.LeakyReLU()(c4)
    c4 = layers.Dropout(0.15)(c4)

    fla = layers.Flatten()(c4)
    out = layers.Dense(1)(fla)

    model = tf.keras.Model(inputs=[input1, input2], outputs=[out])
    return model
Example #14
0
def make_generator(input_dim=NOISE_DIM):
    model = tf.keras.Sequential()

    model.add(layers.Dense(4096, input_shape=(input_dim,)))
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())

    model.add(layers.Dense(4*4*4096))
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())
    model.add(layers.Reshape((4, 4, 4096)))
    assert model.output_shape == (None, 4, 4, 4096), model.output_shape

    model.add(layers.Conv2DTranspose(1024, (5, 5), strides=(2, 2), padding="same"))
    assert model.output_shape == (None, 8, 8, 1024), model.output_shape
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())

    model.add(layers.Conv2DTranspose(512, (5, 5), strides=(1, 1), padding="same"))
    assert model.output_shape == (None, 8, 8, 512), model.output_shape
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())

    model.add(layers.Conv2DTranspose(256, (5, 5), strides=(1, 1), padding="same"))
    assert model.output_shape == (None, 8, 8, 256), model.output_shape
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())

    model.add(layers.Conv2DTranspose(256, (5, 5), strides=(2, 2), padding="same"))
    assert model.output_shape == (None, 16, 16, 256), model.output_shape
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())

    model.add(layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), padding="same"))
    assert model.output_shape == (None, 32, 32, 1), model.output_shape
    model.add(layers.Activation("tanh"))

    return model
Example #15
0
    def Encoder(self,):
        X = tf.keras.Input(shape = [self.w, self.h, self.ch_in])

        hidden = layers.Conv2D(self.hidden_size//4, (3, 3), strides=(1, 1), padding='same')(X)
        hidden = layers.LeakyReLU()(hidden)
        hidden = layers.Dropout(0.3)(hidden)

        if self.num_layers >= 1:
            hidden = layers.Conv2D(self.hidden_size//4, (3, 3), strides=(1, 1), padding='same')(hidden)
            hidden = layers.LeakyReLU()(hidden)
            hidden = layers.Dropout(0.3)(hidden)

        hidden = layers.Conv2D(self.hidden_size//2, (5, 5), strides=(2, 2), padding='same')(hidden)
        hidden = layers.LeakyReLU()(hidden)
        hidden = layers.Dropout(0.3)(hidden)

        if self.num_layers >= 2:
            hidden = layers.Conv2D(self.hidden_size//2, (3, 3), strides=(1, 1), padding='same')(hidden)
            hidden = layers.LeakyReLU()(hidden)
            hidden = layers.Dropout(0.3)(hidden)

        hidden = layers.Conv2D(self.hidden_size, (5, 5), strides=(2, 2), padding='same')(hidden)
        hidden = layers.LeakyReLU()(hidden)
        hidden = layers.Dropout(0.3)(hidden)

        if self.num_layers >= 3:
            hidden = layers.Conv2D(self.hidden_size, (3, 3), strides=(1, 1), padding='same')(hidden)
            hidden = layers.LeakyReLU()(hidden)
            hidden = layers.Dropout(0.3)(hidden)

        hidden = layers.Flatten()(hidden)

        mean = layers.Dense(self.latent_size)(hidden)
        variance = layers.Dense(self.latent_size)(hidden)

        return tf.keras.Model(X, [mean, variance])
Example #16
0
def SVBRDF(num_classes):
    #=============== first layer ==================

    inputs = keras.Input(shape=(256, 256) + (3, ))

    #GF = layers.LeakyReLU()(inputs)
    GF = layers.AveragePooling2D((inputs.shape[1], inputs.shape[1]))(inputs)
    GF = layers.Dense(128)(GF)
    GF = layers.Activation('selu')(GF)

    x = layers.SeparableConv2D(128, 4, 2, padding="same")(inputs)
    x = layers.BatchNormalization()(x)
    #previous_block_activation = x  # Set aside residual

    #========== define filters for unet ===================

    downfilters = np.array([128, 256, 512, 512, 512, 512, 512])
    Upfilters = np.flip(np.copy(downfilters))
    downfilters = np.delete(downfilters, 0)
    prefilter = 128

    #===================== upsampling =======================

    for filters in downfilters:
        #print(x.shape)
        #print(filters)
        GFdown = layers.AveragePooling2D((x.shape[1], x.shape[1]))(x)
        GFup = layers.Dense(prefilter)(GF)
        GF = layers.Concatenate()([GF, GFdown])
        GF = layers.Dense(filters)(GF)
        GF = layers.Activation('selu')(GF)

        x = layers.Add()([x, GFup])
        x = layers.LeakyReLU()(x)
        x = layers.SeparableConv2D(filters, 4, 2, padding="same")(x)
        x = layers.BatchNormalization()(x)
        prefilter = filters

    #====================== downsampling ============================

    for filters in Upfilters:

        GFdown = layers.AveragePooling2D((x.shape[1], x.shape[1]))(x)
        GFup = layers.Dense(prefilter)(GF)
        GF = layers.Concatenate()([GF, GFdown])
        GF = layers.Dense(filters)(GF)
        GF = layers.Activation('selu')(GF)

        x = layers.Add()([x, GFup])
        x = layers.LeakyReLU()(x)
        x = layers.Conv2DTranspose(filters, 4, 2, padding="same")(x)
        x = layers.BatchNormalization()(x)
        prefilter = filters

    #====================== last connection =====================

    GFup = layers.Dense(prefilter)(x)
    x = layers.Add()([x, GFup])
    outputs = layers.Conv2D(num_classes,
                            3,
                            activation="softmax",
                            padding="same")(x)
    model = keras.Model(inputs, outputs)
    return model
Example #17
0
                         kernel_regularizer=ker_reg,
                         kernel_initializer=ker_init,
                         use_bias=True,
                         bias_initializer=ker_init)(tf_x)
   else:
     tf_x = layers.Dense(output, name=name+"_dns", 
                         kernel_regularizer=ker_reg,
                         kernel_initializer=ker_init,
                         use_bias=False,
                         )(tf_x)
     
   if activation != 'linear':
     if bn:
       tf_x = layers.BatchNormalization(name=name+"_bn")(tf_x)
     if activation == 'leaky':
       tf_x = layers.LeakyReLU(name=name+"_LeRe")(tf_x)
     else:
       tf_x = layers.Activation(activation, name=name+"_"+activation)(tf_x)
     if noise>0:
       tf_x = layers.GaussianNoise(stddev=noise, name=name+"_gnoise")(tf_x)
   return tf_x
   
   
 def _define_critic_model(self, input_size, action_size, output_size, 
                          output_activation=None, compile_model=True):
   if not isinstance(input_size,tuple):
     input_size = (input_size,)
   if not isinstance(action_size, tuple):
     action_size = (action_size,)
   tf_input_state = layers.Input(input_size, name='c_input_state')
   tf_input_action = layers.Input(action_size, name='c_input_action')
def downsamplingBlock(res, sz, filters, hiddenLayers=1):
    for _ in range(hiddenLayers):
        res = convBlock(res, sz, filters)
    res = layers.Convolution2D(filters, (2, 2), strides=2, padding="same")(res)
    return layers.LeakyReLU(alpha=0.2)(res)
def convBlock(prev, sz, filters):
    conv_1 = layers.Convolution2D(filters, (sz, sz), padding="same")(prev)
    conv_1 = layers.LeakyReLU(alpha=0.2)(conv_1)
    conv_1 = layers.BatchNormalization()(conv_1)
    # conv_1 = layers.Dropout(0.1)(conv_1)
    return conv_1
Example #20
0
- A discriminator network meant to classify 28x28x1 images into two classes ("fake" and
"real").
- One optimizer for each.
- A loss function to train the discriminator.


"""

from tensorflow.keras import layers

# Create the discriminator
discriminator = keras.Sequential(
    [
        keras.Input(shape=(28, 28, 1)),
        layers.Conv2D(64, (3, 3), strides=(2, 2), padding="same"),
        layers.LeakyReLU(alpha=0.2),
        layers.Conv2D(128, (3, 3), strides=(2, 2), padding="same"),
        layers.LeakyReLU(alpha=0.2),
        layers.GlobalMaxPooling2D(),
        layers.Dense(1),
    ],
    name="discriminator",
)

# Create the generator
latent_dim = 128
generator = keras.Sequential(
    [
        keras.Input(shape=(latent_dim,)),
        # We want to generate 128 coefficients to reshape into a 7x7x128 map
        layers.Dense(7 * 7 * 128),
Example #21
0
def detect_multitone_model_cnn_build(needPrint=False) -> keras.Model:
    """
    detect_multitone_model_cnn_build函数
    功能: 多音调起始/持续时间检测
    输出: 模型
    """
    # 模型搭建
    inputs = keras.Input(
        shape=(initial.config['detect.tone.slice'],
               initial.config['spec.cqt.n_bins'],
               2),
        name='spec_input')

    conv_1 = layers.Conv2D(
        filters=10, kernel_size=(2, 16), strides=(1, 1),
        padding='valid', data_format='channels_last', activation=None,
        kernel_initializer=keras.initializers.HeUniform(),
        bias_initializer=keras.initializers.Zeros(),
        kernel_regularizer=keras.regularizers.L2(l2=5e-5),
        bias_regularizer=None,
        name='conv_1')(inputs)

    bn_1 = layers.BatchNormalization(
        axis=-1, momentum=0.99, epsilon=1e-6,
        center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones',
        moving_mean_initializer='zeros', moving_variance_initializer='ones',
        beta_regularizer=keras.regularizers.l2(l2=1e-5),
        gamma_regularizer=keras.regularizers.l2(l2=1e-5),
        renorm=False, trainable=True, name='bn_1')(conv_1)
    relu_1 = layers.LeakyReLU(alpha=0.1, name='relu_1')(bn_1)
    pool_1 = layers.MaxPool2D(
        pool_size=(2, 2), strides=(1, 2), padding='same', data_format='channels_last', name='pool_1')(relu_1)

    conv_2 = layers.Conv2D(
        filters=20, kernel_size=(3, 12), strides=(1, 1),
        padding='valid', data_format='channels_last', activation=None,
        kernel_initializer=keras.initializers.HeUniform(),
        bias_initializer=keras.initializers.Zeros(),
        kernel_regularizer=keras.regularizers.L2(l2=5e-5),
        bias_regularizer=None,
        name='conv_2')(pool_1)

    bn_2 = layers.BatchNormalization(
        axis=-1, momentum=0.99, epsilon=1e-6,
        center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones',
        moving_mean_initializer='zeros', moving_variance_initializer='ones',
        beta_regularizer=keras.regularizers.l2(l2=1e-5),
        gamma_regularizer=keras.regularizers.l2(l2=1e-5),
        renorm=False, trainable=True, name='bn_2')(conv_2)
    relu_2 = layers.LeakyReLU(alpha=0.1, name='relu_2')(bn_2)
    pool_2 = layers.MaxPool2D(
        pool_size=(2, 2), strides=(1, 2), padding='same', data_format='channels_last', name='pool_2')(relu_2)

    conv_3 = layers.Conv2D(
        filters=40, kernel_size=(3, 9), strides=(1, 1),
        padding='valid', data_format='channels_last', activation=None,
        kernel_initializer=keras.initializers.HeUniform(),
        bias_initializer=keras.initializers.Zeros(),
        kernel_regularizer=keras.regularizers.L2(l2=5e-5),
        bias_regularizer=None,
        name='conv_3')(pool_2)

    bn_3 = layers.BatchNormalization(
        axis=-1, momentum=0.99, epsilon=1e-6,
        center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones',
        moving_mean_initializer='zeros', moving_variance_initializer='ones',
        beta_regularizer=keras.regularizers.l2(l2=1e-5),
        gamma_regularizer=keras.regularizers.l2(l2=1e-5),
        renorm=False, trainable=True, name='bn_3')(conv_3)
    relu_3 = layers.LeakyReLU(alpha=0.1, name='relu_3')(bn_3)
    pool_3 = layers.MaxPool2D(
        pool_size=(2, 2), strides=(2, 2), padding='valid', data_format='channels_last', name='pool_3')(relu_3)

    conv_4 = layers.Conv2D(
        filters=80, kernel_size=(2, 5), strides=(1, 1),
        padding='valid', data_format='channels_last', activation=None,
        kernel_initializer=keras.initializers.HeUniform(),
        bias_initializer=keras.initializers.Zeros(),
        kernel_regularizer=keras.regularizers.L2(l2=5e-5),
        bias_regularizer=None,
        name='conv_4')(pool_3)

    bn_4 = layers.BatchNormalization(
        axis=-1, momentum=0.99, epsilon=1e-6,
        center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones',
        moving_mean_initializer='zeros', moving_variance_initializer='ones',
        beta_regularizer=keras.regularizers.l2(l2=1e-5),
        gamma_regularizer=keras.regularizers.l2(l2=1e-5),
        renorm=False, trainable=True, name='bn_4')(conv_4)
    relu_4 = layers.LeakyReLU(alpha=0.1, name='relu_4')(bn_4)

    reshape_5 = layers.Flatten(name='reshape_5')(relu_4)

    fc_5 = layers.Dense(
        units=256, activation=None, use_bias=True,
        kernel_initializer=keras.initializers.GlorotUniform(),
        bias_initializer=keras.initializers.Zeros(),
        kernel_regularizer=keras.regularizers.L2(l2=4e-3),
        bias_regularizer=None,
        name='fc_5')(reshape_5)
    dropout_5 = layers.Dropout(
        rate=0.5, noise_shape=None, name='dropout_5')(fc_5)
    relu_5 = layers.LeakyReLU(alpha=0.1, name='relu_5')(dropout_5)

    fc_6 = layers.Dense(
        units=88, activation=None, use_bias=True,
        kernel_initializer=keras.initializers.GlorotUniform(),
        bias_initializer=keras.initializers.Constant(dense_pi_init),
        kernel_regularizer=keras.regularizers.L2(l2=4e-3),
        bias_regularizer=None,
        name='fc_6')(relu_5)

    # 模型创建
    model = keras.Model(inputs=inputs, outputs=fc_6, name='multitone_model')

    # 模型展示
    if needPrint:
        model.summary()
        plot_path = os.path.join(
            initial.config['detect.model.png.path'], 'cnn multitone model.jpg')
        keras.utils.plot_model(model, to_file=plot_path, show_shapes=True)

    return model
Example #22
0
 def act(self, x):
     x = layers.BatchNormalization()(x)
     x = layers.LeakyReLU(0.2)(x)
     return x
Example #23
0
File: dcgan.py Project: pl561/dcgan
def make_generator_model(noise_dim):
    model = tf.keras.Sequential()
    # model.add(layers.Dense(8*8*512, use_bias=False, input_shape=(noise_dim,)))
    # model.add(layers.BatchNormalization())
    # model.add(layers.LeakyReLU())
    model.add(layers.Input(shape=(noise_dim, )))

    model.add(layers.Reshape((64, 64, 64)))
    # Note: None is the batch size
    # assert model.output_shape == (None, 32, 32, 128)
    assert model.output_shape == (None, 64, 64, 64)

    # model.add(layers.Conv2DTranspose(
    #     512, (5, 5), strides=(1, 1), padding='same', use_bias=False))
    # assert model.output_shape == (None, 8, 8, 512)
    # model.add(layers.BatchNormalization())
    # model.add(layers.LeakyReLU())

    # model.add(layers.Conv2DTranspose(
    #     256, (5, 5), strides=(2, 2), padding='same', use_bias=False))
    # assert model.output_shape == (None, 16, 16, 256)
    # model.add(layers.BatchNormalization())
    # model.add(layers.LeakyReLU())

    # model.add(layers.Conv2DTranspose(
    #     128, (5, 5), strides=(2, 2), padding='same', use_bias=False))
    # assert model.output_shape == (None, 32, 32, 128)
    # model.add(layers.BatchNormalization())
    # model.add(layers.LeakyReLU())

    # model.add(layers.Conv2DTranspose(
    #     64, (5, 5), strides=(2, 2), padding='same', use_bias=False))
    # assert model.output_shape == (None, 64, 64, 64)
    # model.add(layers.BatchNormalization())
    # model.add(layers.LeakyReLU())

    model.add(
        layers.Conv2DTranspose(32, (5, 5),
                               strides=(2, 2),
                               padding='same',
                               use_bias=False))
    assert model.output_shape == (None, 128, 128, 32)
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())

    model.add(
        layers.Conv2DTranspose(16, (5, 5),
                               strides=(2, 2),
                               padding='same',
                               use_bias=False))
    assert model.output_shape == (None, 256, 256, 16)
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())

    model.add(
        layers.Conv2DTranspose(3, (5, 5),
                               strides=(2, 2),
                               padding='same',
                               use_bias=False,
                               activation='tanh'))
    assert model.output_shape == (None, 512, 512, 3)

    return model
Example #24
0
    def Decoder(self, ):
        """
        Function:
            Build Decoder for transfer the latent noise back to the image.
            Note: the maximum number of hidden layers added is 2.
        """
        model = tf.keras.Sequential()

        # first several layers for CIFAR dataset
        if self.dataset_name == "CIFAR":
            model.add(
                layers.Dense(self.w // 8 * self.h // 8 * self.hidden_size,
                             input_shape=(self.latent_size, ),
                             use_bias=False))
            model.add(layers.BatchNormalization())
            model.add(layers.LeakyReLU(alpha=0.2))

            model.add(
                layers.Reshape((self.w // 8, self.h // 8, self.hidden_size)))

            if self.num_layers >= 1:
                model.add(
                    layers.Conv2DTranspose(self.hidden_size,
                                           (self.k_s, self.k_s),
                                           strides=(1, 1),
                                           padding='same',
                                           use_bias=False))
                model.add(layers.BatchNormalization())
                model.add(layers.LeakyReLU(alpha=0.2))

            model.add(
                layers.Conv2DTranspose(self.hidden_size // 2,
                                       (self.k_s, self.k_s),
                                       strides=(2, 2),
                                       padding='same',
                                       use_bias=False))
            model.add(layers.BatchNormalization())
            model.add(layers.LeakyReLU(alpha=0.2))

        # first several layers for MNIST dataset
        else:
            model.add(
                layers.Dense(self.w // 4 * self.h // 4 * self.hidden_size,
                             input_shape=(self.latent_size, ),
                             use_bias=False))
            model.add(layers.BatchNormalization())
            model.add(layers.LeakyReLU(alpha=0.2))

            model.add(
                layers.Reshape((self.w // 4, self.h // 4, self.hidden_size)))

            if self.num_layers >= 1:
                model.add(
                    layers.Conv2DTranspose(self.hidden_size // 2,
                                           (self.k_s, self.k_s),
                                           strides=(1, 1),
                                           padding='same',
                                           use_bias=False))
                model.add(layers.BatchNormalization())
                model.add(layers.LeakyReLU(alpha=0.2))

        model.add(
            layers.Conv2DTranspose(self.hidden_size // 2, (self.k_s, self.k_s),
                                   strides=(2, 2),
                                   padding='same',
                                   use_bias=False))
        model.add(layers.BatchNormalization())
        model.add(layers.LeakyReLU(alpha=0.2))

        if self.num_layers >= 2:
            model.add(
                layers.Conv2DTranspose(self.hidden_size // 4,
                                       (self.k_s, self.k_s),
                                       strides=(1, 1),
                                       padding='same',
                                       use_bias=False))
            model.add(layers.BatchNormalization())
            model.add(layers.LeakyReLU(alpha=0.2))

        model.add(
            layers.Conv2DTranspose(self.ch_in, (self.k_s, self.k_s),
                                   strides=(2, 2),
                                   padding='same',
                                   use_bias=False))

        return model
Example #25
0
    def call(self, inputs, training=None):
        x = self.conv(inputs)
        x = self.bn(x, training=training)
        x = layers.LeakyReLU(alpha=0.3)(x)

        return x
Example #26
0
 def _generator(self, m):
     layers = []
 
     filters = [64, 128, 256, 512, 512, 512, 512, 512, 512, 512, 512, 512, 256, 128, 64]
     
     input = L.Input(shape=(self.window_size, self.window_size, self.input_channels), name = "gen_input_image")
     
     # layer 0
     convolved = L.Conv2D(filters[0], kernel_size = 4, strides = (2, 2), padding = "same", kernel_initializer = tf.initializers.GlorotUniform())(input)
     layers.append(convolved)
         
     # layer 1
     rectified = L.LeakyReLU(alpha = 0.2)(layers[-1])
     convolved = L.Conv2D(filters[1], kernel_size = 4, strides = (2, 2), padding = "same", kernel_initializer = tf.initializers.GlorotUniform())(rectified)
     normalized = L.BatchNormalization()(convolved, training = True)
     layers.append(normalized)
         
     # layer 2
     rectified = L.LeakyReLU(alpha = 0.2)(layers[-1])
     convolved = L.Conv2D(filters[2], kernel_size = 4, strides = (2, 2), padding = "same", kernel_initializer = tf.initializers.GlorotUniform())(rectified)
     normalized = L.BatchNormalization()(convolved, training = True)
     layers.append(normalized)
         
     # layer 3
     rectified = L.LeakyReLU(alpha = 0.2)(layers[-1])
     convolved = L.Conv2D(filters[3], kernel_size = 4, strides = (2, 2), padding = "same", kernel_initializer = tf.initializers.GlorotUniform())(rectified)
     normalized = L.BatchNormalization()(convolved, training = True)
     layers.append(normalized)
         
     # layer 4
     rectified = L.LeakyReLU(alpha = 0.2)(layers[-1])
     convolved = L.Conv2D(filters[4], kernel_size = 4, strides = (2, 2), padding = "same", kernel_initializer = tf.initializers.GlorotUniform())(rectified)
     normalized = L.BatchNormalization()(convolved, training = True)
     layers.append(normalized)
         
     # layer 5
     rectified = L.LeakyReLU(alpha = 0.2)(layers[-1])
     convolved = L.Conv2D(filters[5], kernel_size = 4, strides = (2, 2), padding = "same", kernel_initializer = tf.initializers.GlorotUniform())(rectified)
     normalized = L.BatchNormalization()(convolved, training = True)
     layers.append(normalized)
     
     # layer 6
     rectified = L.LeakyReLU(alpha = 0.2)(layers[-1])
     convolved = L.Conv2D(filters[6], kernel_size = 4, strides = (2, 2), padding = "same", kernel_initializer = tf.initializers.GlorotUniform())(rectified)
     normalized = L.BatchNormalization()(convolved, training = True)
     layers.append(normalized)
     
     # layer 7
     rectified = L.LeakyReLU(alpha = 0.2)(layers[-1])
     convolved = L.Conv2D(filters[7], kernel_size = 4, strides = (2, 2), padding = "same", kernel_initializer = tf.initializers.GlorotUniform())(rectified)
     normalized = L.BatchNormalization()(convolved, training = True)
     layers.append(normalized)
     
     # layer 8
     rectified = L.ReLU()(layers[-1])
     deconvolved = L.Conv2DTranspose(filters[8], kernel_size = 4, strides = (2, 2), padding = "same", kernel_initializer = tf.initializers.GlorotUniform())(rectified)
     normalized = L.BatchNormalization()(deconvolved, training = True)
     layers.append(normalized)
         
     # layer 9
     concatenated = tf.concat([layers[-1], layers[6]], axis = 3)
     rectified = L.ReLU()(concatenated)
     deconvolved = L.Conv2DTranspose(filters[9], kernel_size = 4, strides = (2, 2), padding = "same", kernel_initializer = tf.initializers.GlorotUniform())(rectified)
     normalized = L.BatchNormalization()(deconvolved, training = True)
     layers.append(normalized)
     
     # layer 10
     concatenated = tf.concat([layers[-1], layers[5]], axis = 3)
     rectified = L.ReLU()(concatenated)
     deconvolved = L.Conv2DTranspose(filters[10], kernel_size = 4, strides = (2, 2), padding = "same", kernel_initializer = tf.initializers.GlorotUniform())(rectified)
     normalized = L.BatchNormalization()(deconvolved, training = True)
     layers.append(normalized)
         
     # layer 11
     concatenated = tf.concat([layers[-1], layers[4]], axis = 3)
     rectified = L.ReLU()(concatenated)
     deconvolved = L.Conv2DTranspose(filters[11], kernel_size = 4, strides = (2, 2), padding = "same", kernel_initializer = tf.initializers.GlorotUniform())(rectified)
     normalized = L.BatchNormalization()(deconvolved, training = True)
     layers.append(normalized)
         
     # layer 12
     concatenated = tf.concat([layers[-1], layers[3]], axis = 3)
     rectified = L.ReLU()(concatenated)
     deconvolved = L.Conv2DTranspose(filters[12], kernel_size = 4, strides = (2, 2), padding = "same", kernel_initializer = tf.initializers.GlorotUniform())(rectified)
     normalized = L.BatchNormalization()(deconvolved, training = True)
     layers.append(normalized)
         
     # layer 13
     concatenated = tf.concat([layers[-1], layers[2]], axis = 3)
     rectified = L.ReLU()(concatenated)
     deconvolved = L.Conv2DTranspose(filters[13], kernel_size = 4, strides = (2, 2), padding = "same", kernel_initializer = tf.initializers.GlorotUniform())(rectified)
     normalized = L.BatchNormalization()(deconvolved, training = True)
     layers.append(normalized)
         
     # layer 14
     concatenated = tf.concat([layers[-1], layers[1]], axis = 3)
     rectified = L.ReLU()(concatenated)
     deconvolved = L.Conv2DTranspose(filters[14], kernel_size = 4, strides = (2, 2), padding = "same", kernel_initializer = tf.initializers.GlorotUniform())(rectified)
     normalized = L.BatchNormalization()(deconvolved, training = True)
     layers.append(normalized)
         
     # layer 15
     concatenated = tf.concat([layers[-1], layers[0]], axis = 3)
     rectified = L.ReLU()(concatenated)
     deconvolved = L.Conv2DTranspose(self.input_channels, kernel_size = 4, strides = (2, 2), padding = "same", kernel_initializer = tf.initializers.GlorotUniform())(rectified)
     rectified = L.ReLU()(deconvolved)
     output = tf.math.subtract(input, rectified)
     
     return K.Model(inputs = input, outputs = output, name = "generator")
Example #27
0
def yoloNetModle():
    IMGSZ = cfg.TRAIN.IMGSZ
    GRIDSZ = cfg.TRAIN.GRIDSZ
    scale = IMGSZ // GRIDSZ

    # 3.1
    input_image = layers.Input((IMGSZ, IMGSZ, 3), dtype='float32')

    # unit1
    x = layers.Conv2D(32, (3, 3),
                      strides=(1, 1),
                      padding='same',
                      name='conv_1',
                      use_bias=False)(input_image)
    x = layers.BatchNormalization(name='norm_1')(x)
    x = layers.LeakyReLU(alpha=0.1)(x)

    x = layers.MaxPooling2D(pool_size=(2, 2))(x)

    # unit2
    x = layers.Conv2D(64, (3, 3),
                      strides=(1, 1),
                      padding='same',
                      name='conv_2',
                      use_bias=False)(x)
    x = layers.BatchNormalization(name='norm_2')(x)
    x = layers.LeakyReLU(alpha=0.1)(x)
    x = layers.MaxPooling2D(pool_size=(2, 2))(x)

    # Layer 3
    x = layers.Conv2D(128, (3, 3),
                      strides=(1, 1),
                      padding='same',
                      name='conv_3',
                      use_bias=False)(x)
    x = layers.BatchNormalization(name='norm_3')(x)
    x = layers.LeakyReLU(alpha=0.1)(x)

    # Layer 4
    x = layers.Conv2D(64, (1, 1),
                      strides=(1, 1),
                      padding='same',
                      name='conv_4',
                      use_bias=False)(x)
    x = layers.BatchNormalization(name='norm_4')(x)
    x = layers.LeakyReLU(alpha=0.1)(x)

    # Layer 5
    x = layers.Conv2D(128, (3, 3),
                      strides=(1, 1),
                      padding='same',
                      name='conv_5',
                      use_bias=False)(x)
    x = layers.BatchNormalization(name='norm_5')(x)
    x = layers.LeakyReLU(alpha=0.1)(x)
    x = layers.MaxPooling2D(pool_size=(2, 2))(x)

    # Layer 6
    x = layers.Conv2D(256, (3, 3),
                      strides=(1, 1),
                      padding='same',
                      name='conv_6',
                      use_bias=False)(x)
    x = layers.BatchNormalization(name='norm_6')(x)
    x = layers.LeakyReLU(alpha=0.1)(x)

    # Layer 7
    x = layers.Conv2D(128, (1, 1),
                      strides=(1, 1),
                      padding='same',
                      name='conv_7',
                      use_bias=False)(x)
    x = layers.BatchNormalization(name='norm_7')(x)
    x = layers.LeakyReLU(alpha=0.1)(x)

    # Layer 8
    x = layers.Conv2D(256, (3, 3),
                      strides=(1, 1),
                      padding='same',
                      name='conv_8',
                      use_bias=False)(x)
    x = layers.BatchNormalization(name='norm_8')(x)
    x = layers.LeakyReLU(alpha=0.1)(x)
    x = layers.MaxPooling2D(pool_size=(2, 2))(x)

    # Layer 9
    x = layers.Conv2D(512, (3, 3),
                      strides=(1, 1),
                      padding='same',
                      name='conv_9',
                      use_bias=False)(x)
    x = layers.BatchNormalization(name='norm_9')(x)
    x = layers.LeakyReLU(alpha=0.1)(x)

    # Layer 10
    x = layers.Conv2D(256, (1, 1),
                      strides=(1, 1),
                      padding='same',
                      name='conv_10',
                      use_bias=False)(x)
    x = layers.BatchNormalization(name='norm_10')(x)
    x = layers.LeakyReLU(alpha=0.1)(x)

    # Layer 11
    x = layers.Conv2D(512, (3, 3),
                      strides=(1, 1),
                      padding='same',
                      name='conv_11',
                      use_bias=False)(x)
    x = layers.BatchNormalization(name='norm_11')(x)
    x = layers.LeakyReLU(alpha=0.1)(x)

    # Layer 12
    x = layers.Conv2D(256, (1, 1),
                      strides=(1, 1),
                      padding='same',
                      name='conv_12',
                      use_bias=False)(x)
    x = layers.BatchNormalization(name='norm_12')(x)
    x = layers.LeakyReLU(alpha=0.1)(x)

    # Layer 13
    x = layers.Conv2D(512, (3, 3),
                      strides=(1, 1),
                      padding='same',
                      name='conv_13',
                      use_bias=False)(x)
    x = layers.BatchNormalization(name='norm_13')(x)
    x = layers.LeakyReLU(alpha=0.1)(x)

    # for skip connection
    skip_x = x  # [b,32,32,512]

    x = layers.MaxPooling2D(pool_size=(2, 2))(x)

    # Layer 14
    x = layers.Conv2D(1024, (3, 3),
                      strides=(1, 1),
                      padding='same',
                      name='conv_14',
                      use_bias=False)(x)
    x = layers.BatchNormalization(name='norm_14')(x)
    x = layers.LeakyReLU(alpha=0.1)(x)

    # Layer 15
    x = layers.Conv2D(512, (1, 1),
                      strides=(1, 1),
                      padding='same',
                      name='conv_15',
                      use_bias=False)(x)
    x = layers.BatchNormalization(name='norm_15')(x)
    x = layers.LeakyReLU(alpha=0.1)(x)

    # Layer 16
    x = layers.Conv2D(1024, (3, 3),
                      strides=(1, 1),
                      padding='same',
                      name='conv_16',
                      use_bias=False)(x)
    x = layers.BatchNormalization(name='norm_16')(x)
    x = layers.LeakyReLU(alpha=0.1)(x)

    # Layer 17
    x = layers.Conv2D(512, (1, 1),
                      strides=(1, 1),
                      padding='same',
                      name='conv_17',
                      use_bias=False)(x)
    x = layers.BatchNormalization(name='norm_17')(x)
    x = layers.LeakyReLU(alpha=0.1)(x)

    # Layer 18
    x = layers.Conv2D(1024, (3, 3),
                      strides=(1, 1),
                      padding='same',
                      name='conv_18',
                      use_bias=False)(x)
    x = layers.BatchNormalization(name='norm_18')(x)
    x = layers.LeakyReLU(alpha=0.1)(x)

    # Layer 19
    x = layers.Conv2D(1024, (3, 3),
                      strides=(1, 1),
                      padding='same',
                      name='conv_19',
                      use_bias=False)(x)
    x = layers.BatchNormalization(name='norm_19')(x)
    x = layers.LeakyReLU(alpha=0.1)(x)

    # Layer 20
    x = layers.Conv2D(1024, (3, 3),
                      strides=(1, 1),
                      padding='same',
                      name='conv_20',
                      use_bias=False)(x)
    x = layers.BatchNormalization(name='norm_20')(x)
    x = layers.LeakyReLU(alpha=0.1)(x)

    # Layer 21
    skip_x = layers.Conv2D(64, (1, 1),
                           strides=(1, 1),
                           padding='same',
                           name='conv_21',
                           use_bias=False)(skip_x)
    skip_x = layers.BatchNormalization(name='norm_21')(skip_x)
    skip_x = layers.LeakyReLU(alpha=0.1)(skip_x)

    skip_x = SpaceToDepth(block_size=2)(skip_x)

    # concat
    # [b,16,16,1024], [b,16,16,256],=> [b,16,16,1280]
    x = tf.concat([skip_x, x], axis=-1)

    # Layer 22
    x = layers.Conv2D(1024, (3, 3),
                      strides=(1, 1),
                      padding='same',
                      name='conv_22',
                      use_bias=False)(x)
    x = layers.BatchNormalization(name='norm_22')(x)
    x = layers.LeakyReLU(alpha=0.1)(x)
    x = layers.Dropout(0.5)(x)  # add dropout
    # [b,16,16,5,7] => [b,16,16,35]

    x = layers.Conv2D(5 * 7, (1, 1),
                      strides=(1, 1),
                      padding='same',
                      name='conv_23')(x)

    output = layers.Reshape((GRIDSZ, GRIDSZ, 5, 7))(x)
    # create model
    model = keras.models.Model(input_image, output)
    # x = tf.random.normal((4, 512, 512, 3))
    # out = model(x)
    # print('out:', out.shape)
    return model
 def get_conv_block(f=64, k=3):
     model = keras.Sequential()
     model.add(layers.Conv2D(filters=f, kernel_size=k, padding='same'))
     model.add(layers.BatchNormalization())
     model.add(layers.LeakyReLU())
     return model
Example #29
0
 def _discriminator(self):
     layers = []
     filters = [32, 64, 64, 128, 128, 256, 256, 256, 8]
     
     input = L.Input(shape=(self.window_size, self.window_size, self.input_channels), name = "dis_input_image")
     
     # layer 1
     convolved = L.Conv2D(filters[0], kernel_size = 3, strides = (1, 1), padding="same")(input)
     rectified = L.LeakyReLU(alpha = 0.2)(convolved)
     layers.append(rectified)
         
     # layer 2
     convolved = L.Conv2D(filters[1], kernel_size = 3, strides = (2, 2), padding="valid")(layers[-1])
     normalized = L.BatchNormalization()(convolved, training = True)
     rectified = L.LeakyReLU(alpha = 0.2)(normalized)
     layers.append(rectified)
         
     # layer 3
     convolved = L.Conv2D(filters[2], kernel_size = 3, strides = (1, 1), padding="same")(layers[-1])
     normalized = L.BatchNormalization()(convolved, training = True)
     rectified = L.LeakyReLU(alpha = 0.2)(normalized)
     layers.append(rectified)
         
     # layer 4
     convolved = L.Conv2D(filters[3], kernel_size = 3, strides = (2, 2), padding="valid")(layers[-1])
     normalized = L.BatchNormalization()(convolved, training = True)
     rectified = L.LeakyReLU(alpha = 0.2)(normalized)
     layers.append(rectified)
         
     # layer 5
     convolved = L.Conv2D(filters[4], kernel_size = 3, strides = (1, 1), padding="same")(layers[-1])
     normalized = L.BatchNormalization()(convolved, training = True)
     rectified = L.LeakyReLU(alpha = 0.2)(normalized)
     layers.append(rectified)
         
     # layer 6
     convolved = L.Conv2D(filters[5], kernel_size = 3, strides = (2, 2), padding="valid")(layers[-1])
     normalized = L.BatchNormalization()(convolved, training = True)
     rectified = L.LeakyReLU(alpha = 0.2)(normalized)
     layers.append(rectified)
         
     # layer 7
     convolved = L.Conv2D(filters[6], kernel_size = 3, strides = (1, 1), padding="same")(layers[-1])
     normalized = L.BatchNormalization()(convolved, training = True)
     rectified = L.LeakyReLU(alpha = 0.2)(normalized)
     layers.append(rectified)
         
     # layer 8
     convolved = L.Conv2D(filters[7], kernel_size = 3, strides = (2, 2), padding="valid")(layers[-1])
     normalized = L.BatchNormalization()(convolved, training = True)
     rectified = L.LeakyReLU(alpha = 0.2)(normalized)
     layers.append(rectified)
         
     # layer 9
     convolved = L.Conv2D(filters[8], kernel_size = 3, strides = (2, 2), padding="valid")(layers[-1])
     normalized = L.BatchNormalization()(convolved, training = True)
     rectified = L.LeakyReLU(alpha = 0.2)(normalized)
     layers.append(rectified)
         
     # layer 10
     dense = L.Dense(1)(layers[-1])
     sigmoid = tf.nn.sigmoid(dense)
     layers.append(sigmoid)
     
     output = [layers[0], layers[1], layers[2], layers[3], layers[4], layers[5], layers[6], layers[7], layers[-1]]
         
     return K.Model(inputs = input, outputs = output, name = "discriminator")
Example #30
0
    def Encoder(self, ):
        """
        Function:
            Encode the image into the latent representation.
            Note: the maximum number of hidden layers added is 2.
        """
        model = tf.keras.Sequential()

        # first several layers for CIFAR dataset
        if self.dataset_name == "CIFAR":
            model.add(
                layers.Conv2D(self.hidden_size // 4, (3, 3),
                              strides=(1, 1),
                              padding='same',
                              input_shape=[self.w, self.h, self.ch_in]))
            model.add(layers.BatchNormalization())
            model.add(layers.LeakyReLU(alpha=0.2))

            model.add(
                layers.Conv2D(self.hidden_size // 2, (3, 3),
                              strides=(2, 2),
                              padding='same'))
            model.add(layers.BatchNormalization())
            model.add(layers.LeakyReLU(alpha=0.2))

            if self.num_layers >= 1:
                model.add(
                    layers.Conv2D(self.hidden_size // 2, (3, 3),
                                  strides=(1, 1),
                                  padding='same'))
                model.add(layers.BatchNormalization())
                model.add(layers.LeakyReLU(alpha=0.2))

            model.add(
                layers.Conv2D(self.hidden_size // 2, (3, 3),
                              strides=(2, 2),
                              padding='same'))
            model.add(layers.BatchNormalization())
            model.add(layers.LeakyReLU(alpha=0.2))

        # first several layers for MNIST dataset
        else:
            model.add(
                layers.Conv2D(self.hidden_size // 2, (self.k_s, self.k_s),
                              strides=(2, 2),
                              padding='same',
                              input_shape=[self.w, self.h, self.ch_in]))
            model.add(layers.BatchNormalization())
            model.add(layers.LeakyReLU(alpha=0.2))

            if self.num_layers >= 1:
                model.add(
                    layers.Conv2D(self.hidden_size // 2, (self.k_s, self.k_s),
                                  strides=(1, 1),
                                  padding='same'))
                model.add(layers.BatchNormalization())
                model.add(layers.LeakyReLU(alpha=0.2))

        model.add(
            layers.Conv2D(self.hidden_size, (3, 3),
                          strides=(2, 2),
                          padding='same'))
        model.add(layers.BatchNormalization())
        model.add(layers.LeakyReLU(alpha=0.2))

        if self.num_layers >= 2:
            model.add(
                layers.Conv2D(self.hidden_size, (3, 3),
                              strides=(1, 1),
                              padding='same'))
            model.add(layers.BatchNormalization())
            model.add(layers.LeakyReLU(alpha=0.2))

        model.add(layers.Flatten())
        model.add(layers.Dropout(0.4))
        model.add(layers.Dense(2 * self.latent_size))

        return model