Exemple #1
0
def make_generator_model():
    model = tf.keras.Sequential()
    model.add(layers.Dense(7 * 7 * 256, use_bias=False, input_shape=(100, )))
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())

    model.add(layers.Reshape((7, 7, 256)))
    assert model.output_shape == (None, 7, 7, 256
                                  )  # Note: None is the batch size

    model.add(
        layers.Conv2DTranspose(128, (5, 5),
                               strides=(1, 1),
                               padding='same',
                               use_bias=False))
    assert model.output_shape == (None, 7, 7, 128)
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())

    model.add(
        layers.Conv2DTranspose(64, (5, 5),
                               strides=(2, 2),
                               padding='same',
                               use_bias=False))
    assert model.output_shape == (None, 14, 14, 64)
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())

    model.add(
        layers.Conv2DTranspose(1, (5, 5),
                               strides=(2, 2),
                               padding='same',
                               use_bias=False,
                               activation='tanh'))
    assert model.output_shape == (None, 28, 28, 1)

    return model
    def create_model(self,
                     activation: str = "relu",
                     L2_lambda: float = 0.02,
                     conv_sizes: List[int] = [2, 4, 16],
                     emb_height: int = 100):

        conv_channels = 2
        input_layer = layers.Input(shape=(self.crop, 1))

        embeddings = layers.Embedding(
            self.max_val,
            emb_height,
            mask_zero=True,
            input_length=self.input_size)(input_layer)

        reshape1 = layers.Reshape((self.crop, emb_height, 1))(embeddings)

        dense = self.create_after_emb(reshape1, conv_channels, emb_height,
                                      activation, L2_lambda, conv_sizes)
        result = models.Model(input_layer, dense)

        print(result.summary())
        # keras.utils.plot_model(result, "{}.png".format(self.name), show_shapes=True)
        return result
Exemple #3
0
 def __init__(self, image_shape):
     super(Discriminator, self).__init__()
     self.image_shape = image_shape
     self.label_embedding = tf.keras.Sequential([
         layers.Dense(image_shape[0] * image_shape[1],
                      input_shape=(NUM_CLASSES, )),
         layers.Reshape((image_shape[0], image_shape[1], 1))
     ])
     self.conv1 = tf.keras.Sequential([
         # IMAGE_SHAPE[2] + 1 is image channels + label condition
         layers.Conv2D(64, (5, 5),
                       strides=(2, 2),
                       padding='same',
                       input_shape=(IMAGE_SHAPE[0], IMAGE_SHAPE[1],
                                    IMAGE_SHAPE[2] + 1)),
         layers.LeakyReLU(),
         layers.Dropout(0.3)
     ])
     self.conv2 = tf.keras.Sequential([
         layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same'),
         layers.LeakyReLU(),
         layers.Dropout(0.3)
     ])
     self.out = tf.keras.Sequential([layers.Flatten(), layers.Dense(1)])
Exemple #4
0
def Spacer(weights=None, config=None):
    '''
    # Arguments
        max_text_len: `int`. Maximum number of characters per sentence.
        num_words: `int`. Maximum number of words to keep. 

    # Returns
        A Keras model instance.

    # Raises
        ValueError: In case of invalid
            filter configurations.

    '''
    input_shape = _obtain_input_shape(config.MAX_TEXT_LEN)

    inputs = layers.Input(shape=input_shape)
    x = layers.Embedding(config.NUM_WORDS,
                         config.EMBEDDING_DIM,
                         input_length=config.MAX_TEXT_LEN)(inputs)

    conv_blocks = _conv_block(x, config.FILTER_NUMS, config.FILTER_SIZES)

    x = layers.Concatenate()(conv_blocks)
    x = layers.LSTM(100, dropout=0.3, return_sequences=True)(x)
    x = layers.LSTM(50, dropout=0.1, return_sequences=True)(x)

    x = layers.TimeDistributed(layers.Dense(300, activation='relu'))(x)
    x = layers.Dropout(0.3)(x)
    x = layers.TimeDistributed(layers.Dense(150, activation='relu'))(x)
    x = layers.TimeDistributed(layers.Dense(1, activation='sigmoid'))(x)
    x = layers.Reshape(input_shape)(x)

    model = models.Model(inputs, x)

    return model
Exemple #5
0
def make_generator_model(noise_dim):
    input_layer = layers.Input(shape=(1, 1, noise_dim),
                               name='gen_input')  # (None, 1, 1, NOISE_DIM)
    x = SNDense(4 * 4 * (G_CONV_DIM * 16), name='gen_first_fc')(
        input_layer)  # (None, 1, 1, 4*4*G_CONV_DIM*16)
    x = layers.Reshape(
        (4, 4, G_CONV_DIM * 16))(x)  # (None, 4, 4, G_CONV_DIM*16)

    x = UpResBlock(G_CONV_DIM * 16,
                   name='gen_block1')(x)  # (None, 8, 8, G_CONV_DIM*16)

    x = UpResBlock(G_CONV_DIM * 8,
                   name='gen_block2')(x)  # (None, 16, 16, G_CONV_DIM*8)

    x = UpResBlock(G_CONV_DIM * 4,
                   name='gen_block3')(x)  # (None, 32, 32, G_CONV_DIM*4)

    x = SelfAttention(name='gen_attention')(x)  # (None, 32, 32, G_CONV_DIM*4)

    x = UpResBlock(G_CONV_DIM * 2,
                   name='gen_block4')(x)  # (None, 64, 64, G_CONV_DIM*2)

    x = UpResBlock(G_CONV_DIM,
                   name='gen_block5')(x)  # (None, 128, 128, G_CONV_DIM)

    x = BatchNorm(name='gen_bn')(x)

    x = layers.ReLU()(x)

    x = SNConv2D(3, (3, 3),
                 strides=(1, 1),
                 padding='same',
                 name='gen_last_conv')(x)  # (None, 128, 128, 3)
    output_layer = layers.Activation('tanh')(x)

    return tf.keras.Model(input_layer, output_layer)
Exemple #6
0
    def build_graph(self, img_feat_input, q_input):
        assert q_input.get_shape().as_list(
        )[1] == self.options['question_len'], "Wrong question length!"
        assert img_feat_input.get_shape().as_list(
        )[1:] == self.options['img_feat_shape'], "Wrong feature shape!"

        embedded_q = self.q_embed(q_input)
        q_mask = self.q_embed.compute_mask(q_input)
        encoded_q = self.q_gru(embedded_q, mask=q_mask)
        _encoded_q = layers.Reshape(
            (1, 1, self.options['q_encoded_size']))(encoded_q)
        encoded_q_tile = tf.tile(_encoded_q, [
            1, self.options['img_feat_shape'][0],
            self.options['img_feat_shape'][1], 1
        ])

        concat_VQ = layers.Concatenate(axis=-1)(
            [img_feat_input, encoded_q_tile])
        topdown_tanh_out = self.topdown_tanh(concat_VQ)
        topdown_sig_out = self.topdown_sig(concat_VQ)
        topdown_gated_out = self.topdown_multiply(
            [topdown_tanh_out, topdown_sig_out])
        topdown_out = self.topdown_conv(topdown_gated_out)
        prob_attention = tf.exp(topdown_out) / tf.reduce_sum(
            tf.exp(topdown_out), axis=(1, 2), keepdims=True)
        attention_feat = tf.reduce_sum(prob_attention * img_feat_input,
                                       axis=(1, 2))

        gated_encoded_q = self.q_gated_tanh(encoded_q)
        gated_atten_feat = self.img_gated_tanh(attention_feat)
        vq_feat = self.VQ_joint([gated_encoded_q, gated_atten_feat])

        pred_skill = self.ans_dense(self.ans_gated_tanh(vq_feat))
        self.outputs['skills'] = pred_skill

        return self.outputs.copy()
Exemple #7
0
def load_vae_dna_model(latent_dim, rc_loss_scale, vae_lr, dummy):
    # Build encoder
    encoder_inputs = keras.Input(shape=(479,))
    x = layers.Dense(512, activation="relu")(encoder_inputs)
    x = layers.Dense(256, activation="relu")(x)
    x = layers.Dense(256, activation="relu")(x)
    x = layers.Dense(256, activation="relu")(x)
    z_mean = layers.Dense(latent_dim, name="z_mean")(x)
    z_log_var = layers.Dense(latent_dim, name="z_log_var")(x)
    z_mean_var = layers.Concatenate()([z_mean, z_log_var])
    z_mean_var = Lambda(lambda x: tf.math.l2_normalize(x, axis=1))(z_mean_var)
    encoder = keras.Model(encoder_inputs, z_mean_var, name="encoder")

    # Build decoder
    latent_inputs = keras.Input(shape=(latent_dim*2,))
    x_mean = Lambda(lambda x: x[:, 0:latent_dim])(latent_inputs)
    x_log_var = Lambda(lambda x: x[:, latent_dim:])(latent_inputs)
    x = Lambda(sampling, output_shape=(latent_dim,), name='z')([x_mean, x_log_var])        
    x = layers.Dense(256, activation="relu")(x)
    x = layers.Dense(256, activation="relu")(x)
    x = layers.Dense(256, activation="relu")(x)
    x = layers.Dense(512, activation="relu")(x)
    x = layers.Dense(479, activation="linear")(x)
    decoder_outputs = layers.Reshape((479,))(x)
    decoder = keras.Model(latent_inputs, decoder_outputs, name="decoder")
    outputs = decoder(encoder(encoder_inputs))
    vae = keras.Model(encoder_inputs, outputs, name='vae_mlp')
    reconstruction_loss = mse(encoder_inputs, outputs)
    reconstruction_loss *= rc_loss_scale
    kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)
    kl_loss = K.sum(kl_loss, axis=-1)
    kl_loss *= -0.5
    vae_loss = K.mean(reconstruction_loss + kl_loss)
    vae.add_loss(vae_loss)
    vae.compile(optimizer=Adam(learning_rate=vae_lr, clipnorm=1.0))
    return encoder, decoder, vae
def Decoder(x,feedback_bits):
    B=4
    decoder_input = DeuantizationLayer(B)(x)
    # x = tf.keras.layers.Reshape((-1, int(feedback_bits/B)))(decoder_input) # tf2.3 不能使用,会报错
    x = tf.reshape(decoder_input, [-1, int(feedback_bits/B)])
    x = layers.Dense(128, activation='relu')(x)
    x = layers.Dense(64, activation='relu')(x)
    x = layers.Dense(1024*16, activation='linear')(x)         ##激活函数是否需要修改看情况
    x_ini = layers.Reshape((16, 32, 2*16))(x)

    x = layers.Conv2D(32, 7, padding='SAME', activation="relu")(x_ini)
    x = layers.Conv2D(16, 5, padding='SAME', activation="relu")(x)
    x = layers.Conv2D(64, 7, padding='SAME', activation="relu")(x)
    for i in range(5):
        x = layers.Conv2D(256, 7, padding = 'SAME',activation="relu")(x)
        x = layers.Conv2D(128,5, padding = 'SAME',activation="relu")(x)
        x = layers.Conv2D(64, 3, padding = 'SAME',activation="relu")(x)
        x = layers.Conv2D(32, 3, padding='SAME', activation="relu")(x)
        x_ini = keras.layers.Add()([x_ini, x])


    decoder_output = layers.Conv2D(2, 3, padding = 'SAME',activation="sigmoid")(x_ini)

    return decoder_output
Exemple #9
0
    def make_generator_model(self):

        model = tf.keras.Sequential()

        model.add(layers.Dense(4*4*1024, use_bias=False, input_shape=(4096,), kernel_initializer=tf.random_normal_initializer(mean=0,stddev=0.01)))
        model.add(layers.BatchNormalization())
        model.add(layers.LeakyReLU())
        model.add(layers.Reshape((4, 4, 1024)))

        model.add(layers.Conv2DTranspose(512, (3, 3), strides=(2, 2), padding='same', use_bias=False, kernel_initializer=tf.random_normal_initializer(mean=0,stddev=0.01)))
        model.add(layers.BatchNormalization())
        model.add(layers.LeakyReLU())

        model.add(layers.Conv2DTranspose(256, (3, 3), strides=(2, 2), padding='same', use_bias=False, kernel_initializer=tf.random_normal_initializer(mean=0,stddev=0.01)))
        model.add(layers.BatchNormalization())
        model.add(layers.LeakyReLU())

        model.add(layers.Conv2DTranspose(128, (3, 3), strides=(2, 2), padding='same', use_bias=False, kernel_initializer=tf.random_normal_initializer(mean=0,stddev=0.01)))
        model.add(layers.BatchNormalization())
        model.add(layers.LeakyReLU())

        model.add(layers.Conv2DTranspose(128, (3, 3), strides=(2, 2), padding='same', use_bias=False, kernel_initializer=tf.random_normal_initializer(mean=0,stddev=0.01)))
        model.add(layers.BatchNormalization())
        model.add(layers.LeakyReLU())

        model.add(layers.Conv2DTranspose(64, (3, 3), strides=(2, 2), padding='same', use_bias=False, kernel_initializer=tf.random_normal_initializer(mean=0,stddev=0.01)))
        model.add(layers.BatchNormalization())
        model.add(layers.LeakyReLU())

        model.add(layers.Conv2DTranspose(64, (3, 3), strides=(2, 2), padding='same', use_bias=False, kernel_initializer=tf.random_normal_initializer(mean=0,stddev=0.01)))
        model.add(layers.BatchNormalization())
        model.add(layers.LeakyReLU())

        model.add(layers.Conv2DTranspose(self.num_channels, (5, 5), strides=(2, 2), padding='same', use_bias=False, kernel_initializer=tf.random_normal_initializer(mean=0,stddev=0.01), name="block7"))

        return model
Exemple #10
0
    def model(self):
        model = tf.keras.Sequential(name='Generator')

        # Layer 1
        model.add(layers.Dense(256, use_bias=True, input_shape=(self.param.num_class + self.param.latent_dim, ),
                               name='l1_dense'))
        model.add(layers.BatchNormalization(name='l1_bn'))
        model.add(layers.LeakyReLU(name='l1_leaky'))
        assert model.output_shape == (None, 256)

        # Layer 2
        model.add(layers.Dense(512, use_bias=True, name='l2_dense'))
        model.add(layers.BatchNormalization(name='l2_bn'))
        model.add(layers.LeakyReLU(name='l2_leaky'))
        assert model.output_shape == (None, 512)

        # Layer 3
        model.add(layers.Dense(1024, use_bias=True, name='l3_dense'))
        model.add(layers.BatchNormalization(name='l3_bn'))
        model.add(layers.LeakyReLU(name='l3_leaky'))
        assert model.output_shape == (None, 1024)

        # Layer 4
        model.add(layers.Dense(2048, use_bias=True, name='l4_dense'))
        model.add(layers.BatchNormalization(name='l4_bn'))
        model.add(layers.LeakyReLU(name='l4_leaky'))
        assert model.output_shape == (None, 2048)

        # Layer 5
        model.add(layers.Dense(784, use_bias=True, activation='tanh', name='l5_dense'))
        model.add(layers.Reshape((28, 28, 1), name='l5_reshape'))
        assert model.output_shape == (None, 28, 28, 1)

        model.summary(line_length=self.param.model_display_len)

        return model
Exemple #11
0
def build_generator(latent_dim):

    model = tf.keras.Sequential()
    model.add(layers.Dense(7*7*256, use_bias=False, input_shape=(latent_dim,)))
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())

    model.add(layers.Reshape((7, 7, 256)))
    assert model.output_shape == (None, 7, 7, 256) # Note: None is the batch size

    model.add(layers.Conv2DTranspose(128, (5, 5), strides=1, padding='same', use_bias=False))
    assert model.output_shape == (None, 7, 7, 128)
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())

    model.add(layers.Conv2DTranspose(64, (5, 5), strides=2, padding='same', use_bias=False))
    assert model.output_shape == (None, 14, 14, 64)
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())

    model.add(layers.Conv2DTranspose(1, (5, 5), strides=2, padding='same', use_bias=False, activation='sigmoid'))
    assert model.output_shape == (None, Img_H, Img_W, 1)

    return model
Exemple #12
0
def build_generator(latent_dim, in_dim=4):
    print('Building the Generator')
    # base model latent input
    in_latent = keras.Input(shape=(latent_dim,))

    # linear scale up to activation maps
    g = layers.Dense(128 * in_dim * in_dim, kernel_initializer='he_normal')(in_latent)
    g = layers.Reshape((in_dim, in_dim, 128))(g)

    # conv 4x4, input block
    g = layers.Conv2D(128, (3, 3), padding='same', kernel_initializer='he_normal')(g)
    g = layers.BatchNormalization()(g)
    g = layers.LeakyReLU(alpha=0.2)(g)

    # conv 3x3
    g = layers.Conv2D(128, (3, 3), padding='same', kernel_initializer='he_normal')(g)
    g = layers.BatchNormalization()(g)
    g = layers.LeakyReLU(alpha=0.2)(g)

    # conv 1x1, output block
    out_image = layers.Conv2D(3, (1, 1), padding='same', kernel_initializer='he_normal')(g)

    # define model
    return keras.models.Model(in_latent, out_image)
Exemple #13
0
    def __init__(self, latent_dim) -> None:
        super().__init__()

        self.latent_dim = latent_dim
        self.inference_net = tf.keras.Sequential([
            layers.InputLayer(input_shape=[28, 28, 1]),
            layers.Conv2D(filters=32, kernel_size=3, strides=2, padding="same", activation="relu"),
            layers.Conv2D(filters=64, kernel_size=3, strides=2, padding="same", activation="relu"),

            layers.Flatten(),
            layers.Dense(latent_dim + latent_dim)
        ])

        self.generative_net = tf.keras.Sequential([
            layers.InputLayer(input_shape=[latent_dim]),

            layers.Dense(7*7*32, activation="relu"),
            layers.Reshape([7, 7, 32]),

            layers.Conv2DTranspose(filters=64, kernel_size=3, strides=2, padding="same", activation="relu"),
            layers.Conv2DTranspose(filters=32, kernel_size=3, strides=2, padding="same", activation="relu"),

            layers.Conv2DTranspose(filters=1, kernel_size=3, strides=1, padding="same", activation="relu"),
        ])
Exemple #14
0
def decoder_112x112(code_size):
    decoder = keras.models.Sequential()
    decoder.add(L.InputLayer((code_size, )))
    decoder.add(L.Dense(3 * 3 * 64, activation='elu'))
    decoder.add(L.Reshape([3, 3, 64]))
    decoder.add(
        L.Conv2DTranspose(filters=32,
                          kernel_size=(3, 3),
                          strides=2,
                          activation='elu',
                          padding='valid'))
    decoder.add(
        L.Conv2DTranspose(filters=16,
                          kernel_size=(3, 3),
                          strides=2,
                          activation='elu',
                          padding='same'))
    decoder.add(
        L.Conv2DTranspose(filters=8,
                          kernel_size=(3, 3),
                          strides=2,
                          activation='elu',
                          padding='same'))
    decoder.add(
        L.Conv2DTranspose(filters=4,
                          kernel_size=(3, 3),
                          strides=2,
                          activation='elu',
                          padding='same'))
    decoder.add(
        L.Conv2DTranspose(filters=1,
                          kernel_size=(3, 3),
                          strides=2,
                          activation='sigmoid',
                          padding='same'))
    return decoder
Exemple #15
0
def generator_model():
    entree_bruit =layers.Input(shape=(100), dtype='float32')
    entree_classe=layers.Input(shape=(10),  dtype='float32')

    result=layers.concatenate([entree_bruit, entree_classe])

    result=layers.Dense(7*7*256, use_bias=False)(result)
    result=layers.BatchNormalization()(result)
    result=layers.LeakyReLU()(result)

    result=layers.Reshape((7, 7, 256))(result)

    result=layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False)(result)
    result=layers.BatchNormalization()(result)
    result=layers.LeakyReLU()(result)

    result=layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False)(result)
    result=layers.BatchNormalization()(result)
    result=layers.LeakyReLU()(result)

    sortie=layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh')(result)

    model=models.Model(inputs=[entree_bruit, entree_classe], outputs=sortie)
    return model
Exemple #16
0
    def _build_subvertencoder(self):
        """build subvert encoder of NAML news encoder.

        Return:
            obj: the subvert encoder of NAML.
        """
        hparams = self.hparams
        input_subvert = keras.Input(shape=(1,), dtype="int32")

        subvert_embedding = layers.Embedding(
            hparams.subvert_num, hparams.subvert_emb_dim, trainable=True
        )

        subvert_emb = subvert_embedding(input_subvert)
        pred_subvert = layers.Dense(
            hparams.filter_num,
            activation=hparams.dense_activation,
            bias_initializer=keras.initializers.Zeros(),
            kernel_initializer=keras.initializers.glorot_uniform(seed=self.seed),
        )(subvert_emb)
        pred_subvert = layers.Reshape((1, hparams.filter_num))(pred_subvert)

        model = keras.Model(input_subvert, pred_subvert, name="subvert_encoder")
        return model
Exemple #17
0
def build_regress_head(width, depth, num_anchors=9):
    options = {
        'kernel_size': 3,
        'strides': 1,
        'padding': 'same',
        # 'kernel_initializer': initializers.normal(mean=0.0, stddev=0.01, seed=None),
        'kernel_initializer': initializers.RandomNormal(mean=0.0, stddev=0.01, seed=None),
        'bias_initializer': 'zeros'
    }

    inputs = layers.Input(shape=(None, None, width))
    outputs = inputs
    for i in range(depth):
        outputs = layers.Conv2D(
            filters=width,
            activation='relu',
            **options
        )(outputs)

    outputs = layers.Conv2D(num_anchors * 4, **options)(outputs)
    # (b, num_anchors_this_feature_map, 4)
    outputs = layers.Reshape((-1, 4))(outputs)

    return models.Model(inputs=inputs, outputs=outputs, name='box_head')
Exemple #18
0
def make_generator_model():
    model = tf.keras.Sequential() # Create a linear stack of layers style model
    model.add(layers.Dense(7*7*256, use_bias=False, input_shape=(100,))) # Start with a Dense (classic) NN layer, with seed as input
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())

    model.add(layers.Reshape((7, 7, 256)))
    assert model.output_shape == (None, 7, 7, 256) # None is the batch size, make sure the model so far is outputting the right shape

    model.add(layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False))
    assert model.output_shape == (None, 7, 7, 128)
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())
    
    model.add(layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False))
    assert model.output_shape == (None, 14, 14, 64)
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())

    model.add(layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh'))
    assert model.output_shape == (None, 28, 28, 1)

    # model has been upsampled to the right shape, return it.
    return model
Exemple #19
0
def model(nbr_classes, nbr_attributs, nbr_boxes, nbr_cellule, nbr_cc=32):
    entree = layers.Input(shape=(cfg.image_size, cfg.image_size, 3),
                          dtype='float32')

    result = block_repeat(entree, 1 * nbr_cc, [3, 3], dropout=0.3)
    result = block_resnet(result, 1 * nbr_cc, 3, dropout=0.3, reduce=True)

    result = block_repeat(result, 2 * nbr_cc, [3, 3, 3], dropout=0.4)
    result = block_resnet(result, 2 * nbr_cc, 3, dropout=0.4, reduce=True)

    result = block_repeat(result, 4 * nbr_cc, [3, 3, 3, 3, 3], dropout=0.5)
    result = block_resnet(result, 4 * nbr_cc, 3, dropout=0.5, reduce=True)

    result = block_repeat(result, 8 * nbr_cc, [3, 3, 3], dropout=0.5)
    result = block_resnet(result, 8 * nbr_cc, 3, dropout=0.5, reduce=True)

    result = layers.Conv2D(nbr_boxes * (5 + nbr_classes + nbr_attributs),
                           1,
                           padding='SAME')(result)
    sortie = layers.Reshape((nbr_cellule, nbr_cellule, nbr_boxes,
                             5 + nbr_classes + nbr_attributs))(result)

    model = models.Model(inputs=entree, outputs=sortie)
    return model
Exemple #20
0
 def __init__(self, config):
     super(Generator, self).__init__()
     self.config = config
     self.gen = Sequential([
         layers.InputLayer(input_shape=(self.config.latent_dim, )),
         layers.Dense(1024),
         layers.BatchNormalization(),
         layers.ReLU(),
         layers.Dense(8 * 8 * 128),
         layers.BatchNormalization(),
         layers.ReLU(),
         layers.Reshape((8, 8, 128)),
         layers.Conv2DTranspose(filters=64,
                                kernel_size=4,
                                strides=2,
                                padding='same'),
         layers.BatchNormalization(),
         layers.ReLU(),
         layers.Conv2DTranspose(filters=1,
                                kernel_size=4,
                                strides=2,
                                padding='same',
                                activation='sigmoid'),
     ])
Exemple #21
0
def se_block(inputs: tf.Tensor, filters: int, se_ratio: float,
             name: str) -> tf.Tensor:
    x = layers.GlobalAveragePooling2D(
        name=f'{name}/squeeze_excite/AvgPool')(inputs)

    se_shape = (filters, 1, 1) if CHANNEL_AXIS == 1 else (1, 1, filters)
    x = layers.Reshape(se_shape)(x)

    se_filters = make_divisible(filters * se_ratio)
    x = layers.Conv2D(se_filters,
                      1,
                      padding='same',
                      name=f'{name}/squeeze_excite/Conv')(x)
    x = layers.ReLU(name=f'{name}/squeeze_excite/Relu')(x)

    x = layers.Conv2D(filters,
                      1,
                      padding='same',
                      name=f'{name}/squeeze_excite/Conv_1')(x)
    x = hard_sigmoid(x)

    x = layers.Multiply(name=f'{name}/squeeze_excite/Mul')([inputs, x])

    return x
Exemple #22
0
def create_voxnet_model_big(input_shape, output_size):
    """
    Creates a big VoxNet.

    See: http://dimatura.net/publications/3dcnn_lz_maturana_scherer_icra15.pdf

    Args:
        input_shape (shape): Input-shape.
        output_size (int): Output-size.

    Returns:
        Model: A model.
    """

    # Trainable params: 7,101,442
    model = models.Sequential(name="C7-F64-P4-D512")
    model.add(layers.Reshape(target_shape=input_shape + (1,), input_shape=input_shape))
    model.add(layers.Conv3D(64, (7, 7, 7), activation="relu"))
    model.add(layers.MaxPooling3D((4, 4, 4)))
    model.add(layers.Flatten())
    model.add(layers.Dense(512, activation="relu"))
    model.add(layers.Dense(output_size))

    return model
Exemple #23
0
def LSTM(N_CLASSES=10, SR=16000, DT=1.0):
    input_shape = (int(SR * DT), 1)
    i = get_melspectrogram_layer(input_shape=input_shape,
                                 n_mels=128,
                                 pad_end=True,
                                 n_fft=512,
                                 win_length=400,
                                 hop_length=160,
                                 sample_rate=SR,
                                 return_decibel=True,
                                 input_data_format='channels_last',
                                 output_data_format='channels_last',
                                 name='2d_convolution')
    x = LayerNormalization(axis=2, name='batch_norm')(i.output)
    x = TimeDistributed(layers.Reshape((-1, )), name='reshape')(x)
    s = TimeDistributed(layers.Dense(64, activation='tanh'),
                        name='td_dense_tanh')(x)
    x = layers.Bidirectional(layers.LSTM(32, return_sequences=True),
                             name='bidirectional_lstm')(s)
    x = layers.concatenate([s, x], axis=2, name='skip_connection')
    x = layers.Dense(64, activation='relu', name='dense_1_relu')(x)
    x = layers.MaxPooling1D(name='max_pool_1d')(x)
    x = layers.Dense(32, activation='relu', name='dense_2_relu')(x)
    x = layers.Flatten(name='flatten')(x)
    x = layers.Dropout(rate=0.2, name='dropout')(x)
    x = layers.Dense(32,
                     activation='relu',
                     activity_regularizer=l2(0.001),
                     name='dense_3_relu')(x)
    o = layers.Dense(N_CLASSES, activation='softmax', name='softmax')(x)
    model = Model(inputs=i.input, outputs=o, name='long_short_term_memory')
    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    return model
Exemple #24
0
def make_generator_model():
    model = tf.keras.Sequential()
    model.add(layers.Dense(4 * 25 * 128, use_bias=False, input_shape=(100, )))

    model.add(layers.Reshape((4, 25, 128)))
    assert model.output_shape == (None, 4, 25, 128)
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())

    model.add(
        layers.Conv2DTranspose(64, (4, 5),
                               strides=(1, 2),
                               padding='same',
                               use_bias=False))
    assert model.output_shape == (None, 4, 50, 64)
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())
    #
    model.add(
        layers.Conv2DTranspose(1, (4, 5),
                               strides=(1, 2),
                               padding='same',
                               use_bias=False))
    model.add(layers.LeakyReLU())
    assert model.output_shape == (None, 4, 100, 1)
    # model.add(layers.BatchNormalization())
    # # # #
    # model.add(layers.Conv2DTranspose(16, (2, 1), strides=(2, 1), padding='valid', use_bias=False))
    # assert model.output_shape == (None, 2, 100, 16)
    # model.add(layers.BatchNormalization())
    # model.add(layers.LeakyReLU())
    #
    # model.add(layers.Conv2DTranspose(1, (2, 1), strides=(2, 1), padding='valid', use_bias=False, activation='tanh'))
    # assert model.output_shape == (None, 4, 100, 1)
    # # #
    return model
def _se_block(inputs, filters, prefix, se_ratio=1 / 4.):
    # [batch, height, width, channel] -> [batch, channel]
    x = layers.GlobalAveragePooling2D(name=prefix + 'squeeze_excite/AvgPool')(inputs)

    # Target shape. Tuple of integers, does not include the samples dimension (batch size).
    # [batch, channel] -> [batch, 1, 1, channel]
    x = layers.Reshape((1, 1, filters))(x)

    # fc1
    x = layers.Conv2D(filters=_make_divisible(filters * se_ratio),
                      kernel_size=1,
                      padding='same',
                      name=prefix + 'squeeze_excite/Conv')(x)
    x = layers.ReLU(name=prefix + 'squeeze_excite/Relu')(x)

    # fc2
    x = layers.Conv2D(filters=filters,
                      kernel_size=1,
                      padding='same',
                      name=prefix + 'squeeze_excite/Conv_1')(x)
    x = HardSigmoid(name=prefix + 'squeeze_excite/HardSigmoid')(x)

    x = layers.Multiply(name=prefix + 'squeeze_excite/Mul')([inputs, x])
    return x
Exemple #26
0
 def __init__(self, latent_dim):
     super(CVAE, self).__init__()
     self.latent_dim = latent_dim
     self.inference_net = tf.keras.Sequential([
         layers.InputLayer(input_shape=(28, 28, 1)),
         layers.Conv2D(filters=32,
                       kernel_size=3,
                       strides=(2, 2),
                       activation='relu'),
         layers.Conv2D(filters=64,
                       kernel_size=3,
                       strides=(2, 2),
                       activation='relu'),
         layers.Flatten(),
         # No activation
         layers.Dense(latent_dim + latent_dim)
     ])
     self.generative_net = tf.keras.Sequential([
         layers.InputLayer(input_shape=(latent_dim, )),
         layers.Dense(units=7 * 7 * 32, activation='relu'),
         layers.Reshape(target_shape=(7, 7, 32)),
         layers.Conv2DTranspose(filters=64,
                                kernel_size=3,
                                strides=(2, 2),
                                padding='SAME',
                                activation='relu'),
         layers.Conv2DTranspose(filters=32,
                                kernel_size=3,
                                strides=(2, 2),
                                padding='SAME',
                                activation='relu'),
         layers.Conv2DTranspose(filters=1,
                                kernel_size=3,
                                strides=(1, 1),
                                padding='SAME')
     ])
def build_PAES(pos_vocab_size, maxnum, maxlen, readability_feature_count, linguistic_feature_count,
               configs):
    pos_embedding_dim = configs.EMBEDDING_DIM
    dropout_prob = configs.DROPOUT
    cnn_filters = configs.CNN_FILTERS
    cnn_kernel_size = configs.CNN_KERNEL_SIZE
    lstm_units = configs.LSTM_UNITS

    pos_word_input = layers.Input(shape=(maxnum*maxlen,), dtype='int32', name='pos_word_input')
    pos_x = layers.Embedding(output_dim=pos_embedding_dim, input_dim=pos_vocab_size, input_length=maxnum*maxlen,
                            weights=None, mask_zero=True, name='pos_x')(pos_word_input)
    pos_x_maskedout = ZeroMaskedEntries(name='pos_x_maskedout')(pos_x)
    pos_drop_x = layers.Dropout(dropout_prob, name='pos_drop_x')(pos_x_maskedout)
    pos_resh_W = layers.Reshape((maxnum, maxlen, pos_embedding_dim), name='pos_resh_W')(pos_drop_x)
    pos_zcnn = layers.TimeDistributed(layers.Conv1D(cnn_filters, cnn_kernel_size, padding='valid'), name='pos_zcnn')(pos_resh_W)
    pos_avg_zcnn = layers.TimeDistributed(Attention(), name='pos_avg_zcnn')(pos_zcnn)
    pos_hz_lstm = layers.LSTM(lstm_units, return_sequences=True, name='pos_hz_lstm')(pos_avg_zcnn)
    pos_avg_hz_lstm = Attention(name='pos_avg_hz_lstm')(pos_hz_lstm)

    # Add linguistic features
    linguistic_input = layers.Input((linguistic_feature_count,), name='linguistic_input')

    # Add Readability features
    readability_input = layers.Input((readability_feature_count,), name='readability_input')

    final_output = layers.Concatenate()([pos_avg_hz_lstm, linguistic_input, readability_input])

    y = layers.Dense(units=1, activation='sigmoid', name='y_att')(final_output)

    model = keras.Model(inputs=[pos_word_input, linguistic_input, readability_input], outputs=y)

    model.summary()

    model.compile(loss='mse', optimizer='rmsprop')

    return model
    def __make_generator_model(self):
        model = keras.Sequential()
        # Dense: transform input vector (latent dim) into 256 low resolution (7x7) images.
        # Note: 7 x 7 works with MNIST (final result is 28 x 28). We don't need bias here
        model.add(
            layers.Dense(256 * 7 * 7,
                         input_shape=(self.__latent_dim, ),
                         use_bias=False))
        # To try to keep mean 0 and std 1
        model.add(layers.BatchNormalization())
        # This reshapes the output into 256 7x7 "images"
        model.add(layers.Reshape((7, 7, 256)))
        # Conv2DTranspose is the opposite of convolution. First parameter: how many output images
        # Second parameter: kernel size (height and width of the window). Third parameter: multiplier of the two input dim
        # Padding to pad evenly, so that we don't loose data if the kernel size is not sub-multiple of the size of the input
        model.add(
            layers.Conv2DTranspose(128, (4, 4), strides=(1, 1),
                                   padding='same'))
        model.add(layers.BatchNormalization())
        # For GAN it is often used LeakyReLU as act. function
        model.add(layers.LeakyReLU(alpha=0.2))
        # Output here is 64 images of 14x14
        model.add(
            layers.Conv2DTranspose(64, (4, 4), strides=(2, 2), padding='same'))
        model.add(layers.BatchNormalization())
        model.add(layers.LeakyReLU(alpha=0.2))
        # This will output a single image. Activation is tanh because we normalize the data to be between -1 and 1
        # Instead of 0-255 (black & white image)
        model.add(
            layers.Conv2DTranspose(1, (4, 4),
                                   strides=(2, 2),
                                   padding='same',
                                   activation='tanh'))
        assert model.output_shape == (None, 28, 28, 1)

        return model
def make_generator(input_shape):  # define generator
    return tf.keras.Sequential([
        layers.Dense(8 * 8 * 256, use_bias=False, input_shape=input_shape),
        layers.BatchNormalization(),
        layers.LeakyReLU(),
        layers.Reshape((8, 8, 256)),
        layers.Conv2DTranspose(128, (5, 5),
                               strides=(1, 1),
                               padding='same',
                               use_bias=False),
        layers.BatchNormalization(),
        layers.LeakyReLU(),
        layers.Conv2DTranspose(64, (5, 5),
                               strides=(2, 2),
                               padding='same',
                               use_bias=False),
        layers.BatchNormalization(),
        layers.LeakyReLU(),
        layers.Conv2DTranspose(1, (5, 5),
                               strides=(2, 2),
                               padding='same',
                               use_bias=False,
                               activation='tanh')
    ])
def create_model(sequence_length, n_char, n_subdomain, n_domain,
                 n_domain_suffix):
    input_layer = []
    # sequence input layer
    sequence_input_layer = layers.Input(shape=(sequence_length, ))
    input_layer.append(sequence_input_layer)
    # convolution block
    char_embedding = layers.Embedding(
        n_char + 1, 64, input_length=sequence_length)(sequence_input_layer)
    conv_layer = convolution_block(char_embedding)
    print('conv_layer------------>', conv_layer)
    # entity embedding
    entity_embedding = []
    for n in [n_subdomain, n_domain, n_domain_suffix]:
        size = 4
        input_l, embedding_l = embedding_block(n, size)
        embedding_l = layers.Reshape(target_shape=(size, ))(embedding_l)
        input_layer.append(input_l)
        entity_embedding.append(embedding_l)
    # concat all layer
#  ch, cw = get_crop_shape(conv4, up_conv5)

    fc_layer = layers.concatenate([conv_layer, *entity_embedding])

    fc_layer = layers.Dropout(rate=0.5)(fc_layer)
    # dense layer
    fc_layer = layers.Dense(128, activation='elu')(fc_layer)
    fc_layer = layers.Dropout(rate=0.2)(fc_layer)
    # output layer
    output_layer = layers.Dense(1, activation='sigmoid')(fc_layer)
    model = models.Model(inputs=input_layer, outputs=output_layer)
    model.compile(
        optimizer='adam',
        loss='binary_crossentropy',
        metrics=[metrics.mae, metrics.mean_absolute_percentage_error])
    return model