Exemplo n.º 1
0
def build(input_size, latent_dim):
    layer_units = [512, 256]
    input_shape = (input_size, 1)
    inputs = Input(shape=input_shape)
    x = inputs
    xd = Dropout(0.1, input_shape=(None, 978, 1))(x)
    x = xd
    for f in layer_units:
        x = Dense(f)(x)
        x = LeakyReLU(alpha=0.2)(x)

    shape = K.int_shape(x)
    x = Flatten()(x)
    latent = Dense(latent_dim, use_bias=False)(x)
    encoder = Model(inputs, latent, name="encoder")
    latent_inputs = Input(shape=(latent_dim,))
    xd_input = Input(shape=input_shape)
    x = Dense(shape[1] * shape[2])(latent_inputs)
    x = Reshape((shape[1], shape[2]))(x)
    for f in layer_units[::-1]:
        x = Dense(f)(x)
        x = LeakyReLU(alpha=0.2)(x)

    x = Dropout(0.5, input_shape=(None, input_size, layer_units[0]))(x)
    z = tf.keras.layers.Concatenate(axis=-1)([x, xd_input])
    x = Dense(1)(z)
    outputs = Activation("tanh")(x)
    decoder = Model([xd_input, latent_inputs], outputs, name="decoder")
    autoencoder = Model(inputs, decoder([xd, encoder(inputs)]), name="autoencoder")
    return autoencoder
def SHCNN(
        input_shape=(192, 192, 3), num_classes=8, initial_lr=0.01, alpha=0.02):
    input = Input(shape=input_shape)
    X = Conv2D(44, (5, 5), padding='same',
               kernel_initializer='he_normal')(input)
    X = MaxPooling2D((2, 2), padding='same')(X)
    X = LeakyReLU(alpha=alpha)(X)
    X = Conv2D(44, (3, 3), padding='same', kernel_initializer='he_normal')(X)
    X = MaxPooling2D((2, 2), padding='same')(X)
    X = LeakyReLU(alpha=alpha)(X)
    X = Conv2D(88, (5, 5), padding='same', kernel_initializer='he_normal')(X)
    X = MaxPooling2D((2, 2), padding='same')(X)
    X = LeakyReLU(alpha=alpha)(X)
    X = Flatten()(X)
    X = Dropout(rate=0.40)(X)
    X = Dense(2048)(X)
    X = LeakyReLU(alpha=alpha)(X)
    X = Dropout(rate=0.40)(X)
    X = Dense(1024)(X)
    X = LeakyReLU(alpha=alpha)(X)
    X = Dense(num_classes)(X)
    output = Activation('softmax')(X)
    model = Model(inputs=input, outputs=output)
    model.compile(optimizer=Adam(lr=initial_lr),
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    return model
Exemplo n.º 3
0
def resdual_net(x, num_filters, num_blocks, name=None):
    x = Lambda(padding)(x)
    x = Conv2D(filters=num_filters,
               kernel_size=3,
               strides=2,
               padding='VALID',
               use_bias=False)(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    for i in range(num_blocks):
        y = Conv2D(filters=num_filters // 2,
                   kernel_size=1,
                   strides=1,
                   padding='VALID',
                   use_bias=False)(x)
        y = BatchNormalization()(y)
        y = LeakyReLU(alpha=0.1)(y)

        y = Lambda(padding)(y)
        y = Conv2D(filters=num_filters,
                   kernel_size=3,
                   strides=1,
                   padding='VALID',
                   use_bias=False)(y)
        y = BatchNormalization()(y)
        y = LeakyReLU(alpha=0.1)(y)
        if i == num_blocks - 1:
            x = Add(name=name)([x, y])
        else:
            x = Add()([x, y])
    return x
Exemplo n.º 4
0
    def build_model(self, n_inputs, n_outputs):
        inputs = Input(shape=(n_inputs, ),
                       name='state_' + str(self.worker_idx))
        x = Dense(units=128,
                  kernel_initializer='he_normal',
                  bias_initializer='zero',
                  name="layer_0_" + str(self.worker_idx))(inputs)

        x = LeakyReLU(alpha=0.05)(x)

        x = Dense(units=128,
                  kernel_initializer='he_normal',
                  bias_initializer='zero',
                  name="layer_1_" + str(self.worker_idx))(x)

        x = LeakyReLU(alpha=0.05)(x)

        x = Dense(units=128,
                  kernel_initializer='he_normal',
                  bias_initializer='zero',
                  name="layer_2_" + str(self.worker_idx))(x)

        x = LeakyReLU(alpha=0.05)(x)

        x = Dense(units=n_outputs,
                  kernel_initializer='he_normal',
                  bias_initializer='zero',
                  activation='linear',
                  name='layer_3_' + str(self.worker_idx))(x)

        model = Model(inputs, x)
        model.summary()
        return model
Exemplo n.º 5
0
def get_downsampled_signal(img_tensor, module_name):
    x = Conv2D(filters=32,
               kernel_size=(5, 5),
               strides=2,
               padding='same',
               name='downsample_{0}_conv_1'.format(module_name))(img_tensor)
    x = InstanceNormalization(name='downsample_{0}_in_1'.format(module_name))(
        x) if module_name == 'content' else x
    x = LeakyReLU(name='downsample_{0}_relu_1'.format(module_name))(x)
    x = Conv2D(filters=64,
               kernel_size=(3, 3),
               strides=2,
               padding='same',
               name='downsample_{0}_conv_2'.format(module_name))(x)
    x = InstanceNormalization(name='downsample_{0}_in_2'.format(module_name))(
        x) if module_name == 'content' else x
    x = LeakyReLU(name='downsample_{0}_relu_2'.format(module_name))(x)
    x = Conv2D(filters=64,
               kernel_size=(3, 3),
               strides=1,
               padding='same',
               name='downsample_{0}_conv_3'.format(module_name))(x)
    x = InstanceNormalization(name='downsample_{0}_in_3'.format(module_name))(
        x) if module_name == 'content' else x
    x = LeakyReLU(name='downsample_{0}_relu_3'.format(module_name))(x)
    return x
Exemplo n.º 6
0
 def activation(self, new_activation):
     if new_activation == 'leaky_relu':
         LR = LeakyReLU(alpha=0.001)
         LR.__name__ = 'relu'
         self._activation = LR
     else:
         self._activation = new_activation
Exemplo n.º 7
0
def res_block(input_x, filters, sizes, layer_id):

    f1, f2 = filters
    s1, s2 = sizes

    #1
    x = Conv2D(f1,
               s1,
               padding='same',
               use_bias=False,
               name='conv_' + str(layer_id))(input_x)
    x = BatchNormalization(epsilon=0.001, name='bnorm_' + str(layer_id))(x)
    x = LeakyReLU(alpha=0.1, name='leaky_' + str(layer_id))(x)

    #2
    x = Conv2D(f2,
               s2,
               padding='same',
               use_bias=False,
               name='conv_' + str(layer_id + 1))(x)
    x = BatchNormalization(epsilon=0.001, name='bnorm_' + str(layer_id + 1))(x)
    x = LeakyReLU(alpha=0.1, name='leaky_' + str(layer_id + 1))(x)

    x = add([input_x, x])

    return x
Exemplo n.º 8
0
def get_upsampled_signal(x):
    y = Conv2DTranspose(filters=64,
                        kernel_size=(3, 3),
                        strides=1,
                        padding='same',
                        name='decode_convtrans_1')(x)
    y = LeakyReLU(name='decode_relu_1')(y)
    y = UpSampling2D(size=(2, 2), name='decode_upsample_1')(y)
    y = Conv2DTranspose(filters=64,
                        kernel_size=(3, 3),
                        strides=1,
                        padding='same',
                        name='decode_convtrans_2')(x)
    y = LeakyReLU(name='decode_relu_2')(y)
    y = UpSampling2D(size=(2, 2), name='decode_upsample_2')(y)
    y = Conv2DTranspose(filters=32,
                        kernel_size=(3, 3),
                        strides=1,
                        padding='same',
                        name='decode_convtrans_3')(y)
    y = LeakyReLU(name='decode_relu_3')(y)
    y = UpSampling2D(size=(2, 2), name='decode_upsample_3')(y)
    y = Conv2DTranspose(filters=16,
                        kernel_size=(2, 2),
                        strides=1,
                        padding='same',
                        name='decode_convtrans_4')(y)
    y = LeakyReLU(name='decode_relu_4')(y)
    return y
Exemplo n.º 9
0
def shortcut_convolution(high_res_img, low_res_target, nb_channels_out):
    if img_size(low_res_target) == 1:
        kernel_size = img_size(high_res_img)
        downsampled_input = kl.TimeDistributed(
            SpectralNormalization(
                kl.Conv2D(nb_channels_out,
                          kernel_size,
                          activation=LeakyReLU(0.2))),
            name='shortcut_conv_1')(high_res_img)
    else:
        strides = int(
            tf.math.ceil(
                (2 + img_size(high_res_img)) / (img_size(low_res_target) - 1)))
        margin = 2
        padding = int(
            tf.math.ceil((strides * (img_size(low_res_target) - 1) -
                          img_size(high_res_img)) / 2) + 1 + margin)
        kernel_size = int(strides * (1 - img_size(low_res_target)) +
                          img_size(high_res_img) + 2 * padding)
        downsampled_input = kl.TimeDistributed(
            kl.ZeroPadding2D(padding=padding))(high_res_img)
        downsampled_input = kl.TimeDistributed(
            SpectralNormalization(
                kl.Conv2D(nb_channels_out,
                          kernel_size,
                          strides=strides,
                          activation=LeakyReLU(0.2))),
            name='shortcut_conv')(downsampled_input)
    downsampled_input = kl.LayerNormalization()(downsampled_input)
    return downsampled_input
Exemplo n.º 10
0
def get_model():
    img_tensor = Input(shape=(64, 64, 3))
    x = Conv2D(filters=32, kernel_size=(5, 5), strides=1,
               padding='same')(img_tensor)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = MaxPool2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    x = Conv2D(filters=64, kernel_size=(3, 3), strides=1, padding='valid')(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = MaxPool2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    x = Conv2D(filters=64, kernel_size=(3, 3), strides=1, padding='valid')(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = MaxPool2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    x = Conv2D(filters=32, kernel_size=(3, 3), strides=1, padding='valid')(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = AvgPool2D(pool_size=(2, 2), strides=1, padding='valid')(
        x
    )  # Blurred output produced by AvgPool2D, intuitively, gives a better estimate of filters used rather than sharp one produced by MaxPool2D because in blur output the neighboring colors are aggregated and sharp outputs often contain max values due to presence of edges.
    x = Flatten()(x)
    x = Dense(units=32, activation='relu')(x)
    x = Dropout(0.25)(x)
    predicted_class = Dense(units=num_classes, activation='softmax')(x)

    model = Model(inputs=[img_tensor], outputs=[predicted_class])

    return model
Exemplo n.º 11
0
def build(input_size, channels, latent_dim):
    layer_units = [512, 256]
    input_shape = (input_size, channels)
    drop_rate = 0.8
    inputs = Input(shape=input_shape)
    x = inputs
    x = Dropout(0.4, input_shape=(None, 978, 1))(x)
    for f in layer_units:
        x = Dense(f)(x)
        x = LeakyReLU(alpha=0.2)(x)

    x = Dropout(drop_rate, input_shape=(None, input_size, layer_units[1]))(x)
    shape = K.int_shape(x)
    x = Flatten()(x)
    latent = Dense(latent_dim, kernel_regularizer=regularizers.l2(1e-5),
                   activity_regularizer=regularizers.l1(1e-5))(x)
    #, kernel_regularizer=regularizers.l2(1e-5),
    #               activity_regularizer=regularizers.l1(1e-5)
    encoder = Model(inputs, latent, name="encoder")
    latent_inputs = Input(shape=(latent_dim,))
    x = Dense(shape[1] * shape[2])(latent_inputs)
    x = Reshape((shape[1], shape[2]))(x)
    for f in layer_units[::-1]:
        x = Dense(f)(x)
        x = LeakyReLU(alpha=0.2)(x)

    x = Dropout(drop_rate, input_shape=(None, input_size, layer_units[0]))(x)
    x = Dense(1)(x)
    outputs = Activation("tanh")(x)
    decoder = Model(latent_inputs, outputs, name="decoder")
    autoencoder = Model(inputs, decoder(encoder(inputs)), name="autoencoder")
    return autoencoder
Exemplo n.º 12
0
def decoder():
    latent_inputs = keras.Input(shape=(128, ))
    x = Dropout(0.25)(Dense(8 * 8 * 512)(latent_inputs))
    x = Reshape((8, 8, 512))(x)
    x = Conv2D(512, 1, padding='same')(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)

    x = Conv2DTranspose(512, 3, strides=2, padding="same")(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)

    x = Conv2DTranspose(256, 3, strides=2, padding="same")(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)

    x = Conv2DTranspose(128, 3, padding="same")(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)

    decoder_outputs = Conv2D(128,
                             1,
                             strides=1,
                             padding='same',
                             activation='sigmoid')(x)

    decoder = Model(latent_inputs, decoder_outputs, name="decoder")
    return decoder
Exemplo n.º 13
0
    def build(self):
        inputs = Input(shape=[None, None, 3])
        conv1 = Conv2D(64, 4, 2, activation=LeakyReLU(0.2), padding='same')(inputs)
        conv2 = Conv2D(128, 4, 2, padding='same')(conv1)
        conv2 = self.norm(conv2)
        conv2 = LeakyReLU(0.2)(conv2)
        conv3 = Conv2D(256, 4, 2, padding='same')(conv2)
        conv3 = self.norm(conv3)
        conv3 = LeakyReLU(0.2)(conv3)
        conv4 = Conv2D(512, 4, 2, padding='same')(conv3)
        conv4 = self.norm(conv4)
        conv4 = LeakyReLU(0.2)(conv4)
        conv5 = Conv2D(1, 4, 2, padding='same')(conv4)

        upconv1 = Conv2DTranspose(256, 4, 2, padding='same')(conv5)
        upconv1 = self.norm(upconv1)
        upconv1 = Activation('relu')(upconv1)
        concat1 = Concatenate()([conv4, upconv1])
        upconv2 = Conv2DTranspose(128, 4, 2, padding='same')(concat1)
        upconv2 = self.norm(upconv2)
        upconv2 = Activation('relu')(upconv2)
        concat2 = Concatenate()([conv3, upconv2])
        upconv3 = Conv2DTranspose(64, 4, 2, padding='same')(concat2)
        upconv3 = self.norm(upconv3)
        upconv3 = Activation('relu')(upconv3)
        concat3 = Concatenate()([conv2, upconv3])
        upconv4 = Conv2DTranspose(32, 4, 2, padding='same')(concat3)
        upconv4 = self.norm(upconv4)
        upconv4 = Activation('relu')(upconv4)
        concat4 = Concatenate()([conv1, upconv4])
        outputs = Conv2DTranspose(self.classes, 4, 2, padding='same', activation='softmax')(concat4)

        model = Model(inputs=inputs, outputs=[conv5, outputs])

        return model
Exemplo n.º 14
0
def discriminator_model(img_shape):
    model = Sequential()
    model.add(
        Conv2D(32,
               kernel_size=3,
               strides=2,
               input_shape=img_shape,
               padding="same"))
    model.add(LeakyReLU(alpha=0.2))
    model.add(Dropout(0.25))
    model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
    model.add(ZeroPadding2D(padding=((0, 1), (0, 1))))
    model.add(LeakyReLU(alpha=0.2))
    model.add(Dropout(0.25))
    model.add(BatchNormalization(momentum=0.8))
    model.add(Conv2D(128, kernel_size=3, strides=2, padding="same"))
    model.add(LeakyReLU(alpha=0.2))
    model.add(Dropout(0.25))
    model.add(BatchNormalization(momentum=0.8))
    model.add(Conv2D(256, kernel_size=3, strides=1, padding="same"))
    model.add(LeakyReLU(alpha=0.2))
    model.add(Dropout(0.25))
    model.add(Flatten())
    model.add(Dense(1, activation='sigmoid'))

    model.summary()

    img = Input(shape=img_shape)
    validity = model(img)
    return Model(img, validity)
Exemplo n.º 15
0
def make_cnn_model(vocab_size=10000, embed_dim=8, input_seq_length=20):
    """
    I am the builder function for the CNN Model.
    :param vocab_size: size of the vocabulary of the embedding, should be size of vocab of the vectorizer
    :param embed_dim: how many dimensions to use for the vector embedding
    :param input_seq_length: how long the sequence of inputs will be
    :return: Keras Model
    """
    x = inp = Input(shape=(None, ), dtype="int64")
    x = Embedding(
        input_dim=vocab_size,
        output_dim=embed_dim,
        input_length=input_seq_length,
    )(x)
    x = Conv1D(filters=64, kernel_size=3, strides=2, activation="linear")(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = Conv1D(filters=64, kernel_size=3, strides=2, activation="linear")(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = Conv1D(filters=64, kernel_size=3, strides=2, activation="linear")(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = GlobalMaxPooling1D()(x)
    x = Dense(units=128, activation="linear")(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    out = Dense(1, activation="sigmoid")(x)
    return Model(inputs=[inp], outputs=[out], name="cnn_model")
Exemplo n.º 16
0
def Encoder(inputs, opts, istrain=True, name='e1'):
    assert opts.isize % 16 == 0, "isize has to be a multiple of 16"
    ''' initial layer'''
    x = Conv2D(opts.gen_filter, (4, 4),
               strides=2,
               padding='same',
               use_bias=False)(inputs)
    x = LeakyReLU(0.2)(x)
    size_now = opts.isize // 2
    ''' Extra layers'''
    for t in range(opts.n_extra_layers):
        x = Conv2D(opts.gen_filter, (3, 3), padding='same', use_bias=False)(x)
        x = batch_norm(x, name + "_bn1_" + str(t), is_train=istrain)
        x = LeakyReLU(0.2)(x)

    channel = opts.gen_filter  # channel: default number is 64
    ''' reduction layers'''
    while size_now > 4:
        x = Conv2D(channel * 2, (4, 4),
                   strides=2,
                   padding='same',
                   use_bias=False)(x)
        x = batch_norm(x, name + "_bn2_" + str(channel), is_train=istrain)
        x = LeakyReLU(0.2)(x)
        channel = channel * 2
        size_now = size_now // 2

    # state size. channel x 4 x 4
    ''' final layer, resize the layer to channel X 1 X 1'''
    output = Conv2D(opts.z_size, (4, 4), padding='valid', use_bias=False)(x)

    return output
Exemplo n.º 17
0
    def __init__(self, raanan_architecture=False, sigmoid_activation=True):
        super(Decoder, self).__init__()

        self.input_layer = InputLayer()
        self.fully_connected3 = Dense(512)
        self.fully_connected4 = Dense(7 * 7 * 64)
        self.reshape = Reshape((7, 7, 64))
        self.conv_transpose1 = Conv2DTranspose(32,
                                               3,
                                               padding="same",
                                               strides=2)
        self.conv_transpose2 = Conv2DTranspose(1, 3, padding="same", strides=2)

        self.relu1 = ReLU()
        self.relu2 = ReLU()
        self.relu3 = ReLU()

        self.last_activation = sigmoid if sigmoid_activation else tanh
        if raanan_architecture:
            self.relu1 = LeakyReLU()
            self.relu2 = LeakyReLU()
            self.relu3 = LeakyReLU()

        print("Decoder network created with raanan architecture={}".format(
            raanan_architecture))
Exemplo n.º 18
0
    def get_model(self, input_shape, n_classes=1, use_imagenet=False):
        decoder = Sequential()
        number_layers = int(math.log2(input_shape[0])) - 1
        for i in range(number_layers):
            if i == 0:
                self.build_block(decoder,
                                 self.filters * (2**(number_layers - i - 2)),
                                 Conv2DTranspose,
                                 input_shape=self.code_shape)
                #decoder.add(Activation('relu'))
                decoder.add(LeakyReLU(alpha=self.leaky_alpha))
            elif i == number_layers - 1:
                self.build_block(decoder, 3, Conv2DTranspose, padding='same')
                decoder.add(Activation(activation='tanh'))
            else:
                self.build_block(decoder,
                                 self.filters * (2**(number_layers - i - 2)),
                                 Conv2DTranspose,
                                 padding='same')
                #decoder.add(Activation('relu'))
                decoder.add(LeakyReLU(alpha=self.leaky_alpha))

        self.model = decoder

        return self.model
Exemplo n.º 19
0
    def get_model(self, input_shape, n_classes=1, use_imagenet=False):
        encoder = Sequential()
        number_layers = int(math.log2(input_shape[0])) - 1
        for i in range(number_layers):
            if i == 0:
                self.build_block(encoder,
                                 self.filters * (2**i),
                                 Conv2D,
                                 padding='same',
                                 input_shape=input_shape)
                encoder.add(LeakyReLU(alpha=self.leaky_alpha))
            elif i == (number_layers - 1):
                self.build_block(encoder, self.code_shape, Conv2D)
                encoder.add(LeakyReLU(alpha=self.leaky_alpha))
            else:
                self.build_block(encoder,
                                 self.filters * (2**i),
                                 Conv2D,
                                 padding='same')
                encoder.add(LeakyReLU(alpha=self.leaky_alpha))

        if self.include_top:
            encoder.add(Flatten())
            encoder.add(
                Dense(n_classes, activation='sigmoid', name='predictions'))

        self.model = encoder

        return self.model
Exemplo n.º 20
0
def discriminator(num_filters=64, num_downsamplings=3):
    num_filters_ = num_filters
    x_in = Input(shape=(None, None, 3))

    x = Conv2D(num_filters, kernel_size=4, padding='same')(x_in)
    x = LeakyReLU(alpha=0.2)(x)

    for _ in range(num_downsamplings - 1):
        num_filters = min(num_filters * 2, num_filters_ * 8)
        x = Conv2D(num_filters,
                   kernel_size=4,
                   strides=2,
                   padding='same',
                   use_bias=False)(x)
        x = InstanceNormalization()(x)
        x = LeakyReLU(alpha=0.2)(x)

    num_filters = min(num_filters * 2, num_filters_ * 8)
    x = Conv2D(num_filters,
               kernel_size=4,
               strides=1,
               padding='same',
               use_bias=False)(x)
    x = InstanceNormalization()(x)
    x = LeakyReLU(alpha=0.2)(x)

    x = Conv2D(1, kernel_size=4, strides=1, padding='same')(x)

    return Model(x_in, x)
    def build_generator(self, config_gener):

        m = float(config_gener['momentum'])
        a = float(config_gener['leaky_relu_alpha'])

        model = Sequential()

        model.add(
            Dense(int(config_gener['dense1']), input_dim=self.embeddings_dim))
        model.add(LeakyReLU(alpha=a))
        model.add(BatchNormalization(momentum=m))
        model.add(Dense(int(config_gener['dense2'])))
        model.add(LeakyReLU(alpha=a))
        model.add(BatchNormalization(momentum=m))
        model.add(Dense(int(config_gener['dense3'])))
        model.add(LeakyReLU(alpha=a))
        model.add(BatchNormalization(momentum=m))
        model.add(
            Dense(np.prod(self.img_shape),
                  activation=config_gener['activation']))
        model.add(Reshape(self.img_shape))

        noise = Input(shape=(self.embeddings_dim, ))
        label = Input(shape=(1, ), dtype='int32')
        label_embedding = Flatten()(Embedding(
            self.embeddings_vocab,
            self.embeddings_dim,
            weights=[self.word_embeddings.vectors],
            trainable=False)(label))

        model_input = multiply([noise, label_embedding])
        img = model(model_input)

        return Model([noise, label], img)
def sr_resnet(num_filters=64, num_res_blocks=16):
    lr_input = Input(shape=(24, 24, 3))

    x_start = Conv2D(64, kernel_size=3, strides=1, padding='same')(lr_input)
    x_start = LeakyReLU(0.2)(x_start)

    x = RRDB(x_start)

    x = Conv2D(64, kernel_size=3, strides=1, padding='same')(x)
    x = Lambda(lambda x: x * 0.2)(x)
    x = Add()([x, x_start])

    x = upsample(x, 1)
    if upscaling_factor > 2:
        x = upsample(x, 2)
    if upscaling_factor > 4:
        x = upsample(x, 3)

    x = Conv2D(64, kernel_size=3, strides=1, padding='same')(x)
    x = LeakyReLU(0.2)(x)
    hr_output = Conv2D(channels,
                       kernel_size=3,
                       strides=1,
                       padding='same',
                       activation='tanh')(x)

    model = Model(inputs=lr_input, outputs=hr_output)

    return model
Exemplo n.º 23
0
    def __init__(self, act='', lrelu_alpha=0.1, **kwargs):
        super(_Act, self).__init__(**kwargs)

        if act == 'prelu':
            self.func = PReLU()
        else:
            self.func = LeakyReLU(alpha=lrelu_alpha)
Exemplo n.º 24
0
    def discriminator(self):
        if self.D:
            return self.D
        self.D = Sequential()
        depth = 64
        dropout = 0.2

        input_shape = (self.img_rows, self.img_cols, self.channel)
        self.D.add(
            Conv2D(depth * 1,
                   5,
                   strides=2,
                   input_shape=input_shape,
                   padding='same'))
        self.D.add(LeakyReLU(alpha=0.2))
        self.D.add(Dropout(dropout))

        self.D.add(Conv2D(depth * 2, 5, strides=2, padding='same'))
        self.D.add(LeakyReLU(alpha=0.2))
        self.D.add(Dropout(dropout))

        self.D.add(Conv2D(depth * 4, 5, strides=2, padding='same'))
        self.D.add(LeakyReLU(alpha=0.2))
        self.D.add(Dropout(dropout))

        self.D.add(Conv2D(depth * 8, 5, strides=1, padding='same'))
        self.D.add(LeakyReLU(alpha=0.2))
        self.D.add(Dropout(dropout))

        # Out: 1-dim probability
        self.D.add(Flatten())
        self.D.add(Dense(1))
        self.D.add(Activation('sigmoid'))
        self.D.summary()
        return self.D
Exemplo n.º 25
0
    def __generate_detection_resnet34(self, input_layer, n_category=None):
        out_filters = n_category + 4

        x_1, x_2, x_3, x = self.__generate_encoder(input_layer)

        # Deconvolution block 1: (16, 16, 512) -> (32, 32, 512)
        x = Conv2D(filters=256, kernel_size=3, strides=1, padding='same')(x)
        x = Conv2DTranspose(filters=256, kernel_size=4, strides=2, padding='same')(x)
        x = Concatenate()([x_3, x])
        x = BatchNormalization()(x)
        x = LeakyReLU(alpha=0.1)(x)

        # Deconvolution block 2: (32, 32, 512) -> (64, 64, 256)
        x = Conv2D(filters=128, kernel_size=3, strides=1, padding='same')(x)
        x = Conv2DTranspose(filters=128, kernel_size=4, strides=2, padding='same')(x)
        x = Concatenate()([x_2, x])
        x = BatchNormalization()(x)
        x = LeakyReLU(alpha=0.1)(x)

        # Deconvolution block 3: (64, 64, 256) -> (128, 128, 128)
        x = Conv2D(filters=64, kernel_size=3, strides=1, padding='same')(x)
        x = Conv2DTranspose(filters=64, kernel_size=4, strides=2, padding='same')(x)
        x = Concatenate()([x_1, x])
        x = BatchNormalization()(x)
        x = LeakyReLU(alpha=0.1)(x)

        # Block 4:
        x = Conv2D(filters=out_filters, kernel_size=1, strides=1, padding='same')(x)
        out = Activation('sigmoid')(x)  # optional

        return Model(input_layer, out)
Exemplo n.º 26
0
    def createModel( self, inputs, outputs, hiddenLayers, activationType, learningRate ):
        model   = Sequential()
        if len(hiddenLayers) == 0:
            model.add( Dense(self.output_size,
                                input_shape=(self.input_size,),
                                init='lecun_uniform') )
            model.add( Activation('linear') )
        else:
            model.add( Dense(hiddenLayers[0],
                                input_shape=(self.input_size,),
                                kernel_initializer='lecun_uniform') ) 
            if activationType == 'LeakyReLU':
                model.add( LeakyReLU(alpha=0.01) )
            else:
                model.add( Activation(activationType) )

            for index in range(1,len(hiddenLayers)):
                layerSize   = hiddenLayers[index]
                model.add( Dense(layerSize,kernel_initializer='lecun_uniform') )
                if activationType == 'LeakyReLU':
                    model.add( LeakyReLU(alpha=0.01) )
                else:
                    model.add( Activation(activationType) )
            model.add( Dense(self.output_size,kernel_initializer='lecun_uniform') )
            model.add( Activation('linear') )
        optimizer   = optimizers.RMSprop( lr = learningRate, rho = 0.9, epsilon = 1e-6 )
        model.compile( loss = "mse", optimizer = optimizer )
        model.summary()
        return model
Exemplo n.º 27
0
    def build(self, hr_shape):
        x_in = Input(shape=hr_shape)

        x = self.discriminator_block(x_in, self.n_filters)
        x = self.discriminator_block(x, self.n_filters * 2)
        x = self.discriminator_block(x, self.n_filters * 4)
        x = self.discriminator_block(x, self.n_filters * 8)
        x = self.discriminator_block(x, self.n_filters * 16)

        x = Conv2D(self.n_filters * 16,
                   kernel_size=3,
                   strides=1,
                   kernel_initializer=self.init_kernel,
                   use_bias=False,
                   padding='same')(x)
        x = LeakyReLU(alpha=0.2)(x)
        x = Conv2D(self.n_filters * 16,
                   kernel_size=3,
                   strides=1,
                   kernel_initializer=self.init_kernel,
                   use_bias=False,
                   padding='same')(x)

        x = LeakyReLU(alpha=0.2)(x)
        x = Flatten()(x)

        x = Dense(128)(x)
        x = LeakyReLU(alpha=0.2)(x)

        x = Dense(1)(x)
        x = Model(x_in, x)

        return x
Exemplo n.º 28
0
    def build(self):
        input_discriminator = Input(shape=self.input_shape)

        x = Conv2D(64,
                   kernel_size=(3, 3),
                   strides=(1, 1),
                   use_bias=False,
                   activation=None,
                   padding='same')(input_discriminator)
        x = LeakyReLU(alpha=0.2)(x)

        x = conv_block(x,
                       filters=64,
                       kernel_size=(4, 4),
                       strides=(2, 2),
                       axis=self.axis)
        x = conv_block(x,
                       filters=128,
                       kernel_size=(3, 3),
                       strides=(1, 1),
                       axis=self.axis)
        x = conv_block(x,
                       filters=128,
                       kernel_size=(4, 4),
                       strides=(2, 2),
                       axis=self.axis)
        x = conv_block(x,
                       filters=256,
                       kernel_size=(3, 3),
                       strides=(1, 1),
                       axis=self.axis)
        x = conv_block(x,
                       filters=256,
                       kernel_size=(4, 4),
                       strides=(2, 2),
                       axis=self.axis)
        x = conv_block(x,
                       filters=512,
                       kernel_size=(3, 3),
                       strides=(1, 1),
                       axis=self.axis)
        x = conv_block(x,
                       filters=512,
                       kernel_size=(4, 4),
                       strides=(2, 2),
                       axis=self.axis)
        x = Flatten(data_format=self.data_format)(x)
        x = Dense(1024, activation=None)(x)
        x = LeakyReLU(alpha=0.2)(x)
        output_discriminator = Dense(1, activation='sigmoid')(x)

        discriminator_model = Model(inputs=input_discriminator,
                                    outputs=output_discriminator,
                                    name="Discriminador")

        # tf.keras.utils.plot_model(discriminator_model, 'E:\\TFM\\outputs\\model_imgs\\discriminator_model.png',
        #                           show_shapes=True)

        return discriminator_model
Exemplo n.º 29
0
    def createRegularizedModel( self, 
                                inputs, 
                                outputs, 
                                hiddenLayers,       # List of nodes at each hidden layer
                                activationType, 
                                learningRate ):
        bias    = True
        dropout = 0
        regularizationFactor    = 0.01
        model   = Sequential()
        if len(hiddenLayers) == 0:
            model.add( Dense(self.output_size,
                                input_shape=(self.input_size,),
                                init='lecun_uniform',bias=bias) )
            model.add( Activation("linear") )
        else:
            if regularizationFactor > 0:
                model.add( Dense(hiddenLayers[0],
                                    input_shape=(self.input_size,),
                                    init='lecun_uniform',
                                    W_regularizer=l2(regularizationFactor),
                                    bias=bias) )
            else:
                model.add( Dense(hiddenLayers[0],
                                    input_shape=(self.input_size,),
                                    init='lecun_uniform',
                                    bias=bias) )
            if activationType == 'LeakyReLU':
                model.add( LeakyReLU(alpha=0.01) )
            else:
                model.add( Activation(activationType) )

            for index in range(1,len(hiddenLayers)):
                layerSize   = hiddenSize[index]
                if regularizationFactor > 0.0:
                    model.add( Dense(hiddenLayers[index],
                                        init='lecun_uniform',
                                        W_regularizer=l2(regularizationFactor),
                                        bias=bias) )
                else:
                    model.add( Dense(hiddenLayers[index],
                                    init='lecun_uniform',
                                    bias=bias) )
                if activationType == "LeakyReLU":
                    model.add( LeakyReLU(alpha=0.01) )
                else:
                    model.add( Activation(activationType) )
                if dropout > 0:
                    model.add( Dropout(dropout) )
            model.add( Dense(self.output_size,
                                init='lecun_uniform',
                                bias=bias) )
            model.add( Activation("linear") )
        optimizer   = optimizers.RMSprop( lr = learningRate, rho = 0.9, epsilon = 1e-6 )
        model.compile( loss = "mse", optimizer = optimizer )
        model.summary()
        return model
Exemplo n.º 30
0
def _deconv_block(x, filters, kernel_size=1):
    x = DepthwiseConv2D(kernel_size=kernel_size,
                        padding="same",
                        use_bias=False)(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = Conv2D(filters, kernel_size=1, use_bias=False)(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    return UpSampling2D()(x)