Example #1
0
def dnn_2(input_dim, nb_class):
    inputs = Input(shape=input_dim)
    x = Dense(256, kernel_initializer='he_uniform')(x)
    x = InstanceNormalization()(x)
    x = Activation(swish)(x)
    x = GaussianDropout(0.3)(x)
    x = Dense(128, kernel_initializer='he_uniform')(x)
    x = InstanceNormalization()(x)
    x = Activation(swish)(x)
    x = GaussianDropout(0.3)(x)
    x = Dense(64, kernel_initializer='he_uniform')(x)
    x = InstanceNormalization()(x)
    x = Activation(swish)(x)
    x = GaussianDropout(0.3)(x)
    x = Dense(32, kernel_initializer='he_uniform')(x)
    x = InstanceNormalization()(x)
    x = Activation(swish)(x)
    x = GaussianDropout(0.3)(x)

    output = Dense(1,
                   activation='sigmoid',
                   kernel_initializer='glorot_uniform')(model)
    model = Model(inputs=inputs, outputs=outputs)

    return model
Example #2
0
def apply_dropout(inp, rate, dropout_type='standard', name=None):
    '''Helper function to add a dropout layer of a specified type to a model

    Parameters:
    ----------
    inp: tensor
        The input tensor
    rate: float
        The rate parameter of the dropout (proportion of units dropped)
    dropout_type: str
        The type of the dropout. Allowed values are ['standard', 'gaussian', 'alpha', 'none'], which respectively
        correspond to the Dropout, GaussianDropout, and AlphaDropout keras layers, or no dropout. The default is
        'standard'
    name: str
        This string is passed as the name parameter when constructing the layer

    Returns:
    -------
    tensor
        The output tensor after application of the dropout layer
    '''

    if dropout_type == 'standard':
        output = Dropout(rate, name=name)(inp)
    elif dropout_type == 'gaussian':
        output = GaussianDropout(rate, name=name)(inp)
    elif dropout_type == 'alpha':
        output = AlphaDropout(rate, name=name)(inp)
    elif dropout_type == 'none':
        output = inp
    else:
        raise ValueError('Unrecognised dropout type {}'.format(dropout_type))
    return output
Example #3
0
 def _build_discriminator_latent(self,
                                 latent_dim,
                                 layers=16,
                                 width=16,
                                 hidden_activation='relu',
                                 init=RandomNormal(mean=0, stddev=0.02),
                                 add_noise=True):
     """Build a model that classifies latent vectors as real or fake."""
     input_layer = Input((latent_dim,))
     F = input_layer
     if add_noise:
         F = GaussianNoise(0.01)(F)
     for i in range(layers):
         X = Dense(width)(F)
         if add_noise:
             X = GaussianDropout(0.005)(X)
         X = LayerNormalization()(X)
         if hidden_activation == 'leaky_relu':
             X = LeakyReLU(0.02)(X)
         else:
             X = Activation(hidden_activation)(X)
         F = Concatenate()([F, X])
     X = Dense(128)(F)
     if hidden_activation == 'leaky_relu':
         X = LeakyReLU(0.02)(X)
     else:
         X = Activation(hidden_activation)(X)
     X = Dense(1)(X)
     output_layer = Activation('sigmoid')(X)
     model = Model(input_layer, output_layer)
     model.compile(Adam(clipnorm=1, lr=self.parameters["lr"]["gan_discriminator"],
                        beta_1=0.5),
                   loss=self.parameters["loss"]["adversarial"])
     return model
    def build_base_encoder(self):

        """ phase [-1]: base-encoder q(z|x) & q(y|x) """

        inputs = Input(shape=(self.input_dim,))

        if self.dropout_rate is not None:

            z = GaussianDropout(self.dropout_rate)(inputs)

        else:

            z = inputs

        z = self.encoder_block(z)

        self.base_encoder = Model(inputs, z)
    def _build_feature_extractor(self,
                                 image_shape,
                                 output_width,
                                 channels=None,
                                 kernel_widths=None,
                                 strides=None,
                                 hidden_activation='relu',
                                 output_activation='linear',
                                 init=RandomNormal(mean=0, stddev=0.02),
                                 add_noise=False):
        """Build a model that maps images to some feature space."""

        if not (len(channels) == len(kernel_widths)
                and len(kernel_widths) == len(strides)):
            raise ValueError("channels, kernel_widths, strides must have equal"
                             f" length; got {len(channels)},"
                             f"{len(kernel_widths)}, {len(strides)}")

        input_layer = Input(image_shape)
        X = input_layer

        if add_noise:
            X = GaussianNoise(0.01)(X)
        for channel, kernel, stride in zip(channels, kernel_widths, strides):
            X = Conv2D(channel,
                       kernel,
                       strides=stride,
                       padding='same',
                       kernel_initializer=init)(X)
            if add_noise:
                X = GaussianDropout(0.005)(X)
            X = LayerNormalization()(X)
            if hidden_activation == 'leaky_relu':
                X = LeakyReLU(0.02)(X)
            else:
                X = Activation(hidden_activation)(X)
        X = Flatten()(X)
        X = Dense(output_width, kernel_initializer=init)(X)
        output_layer = Activation(output_activation)(X)
        model = Model(input_layer, output_layer)
        return model
    #     x_val, y_val,
    #     batch_size = batch_size
    # )

    mc = ModelCheckpoint('c:/data/modelcheckpoint/lotte_' + str(count) +
                         '.hdf5',
                         save_best_only=True,
                         verbose=1)

    model = Sequential()
    model.add(eff)
    # model.add(Conv2D(1024, kernel_size=3, padding='same', activation = 'swish'))
    model.add(GlobalAveragePooling2D())
    model.add(Dropout(0.3))
    model.add(Dense(128, activation='swish'))
    model.add(GaussianDropout(0.4))
    model.add(Dense(1000, activation='softmax'))

    model.compile(optimizer='adam',
                  loss='sparse_categorical_crossentropy',
                  metrics='acc')

    # hist = model.fit_generator(
    #     train_set,
    #     validation_data=val_set,
    #     epochs=1,
    #     steps_per_epoch=2400,
    #     callbacks=[es, rl, mc]
    # )

    model.fit(x_train,
Example #7
0
TF = EfficientNetB5(weights="imagenet",
                    include_top=False,
                    input_shape=image_size)
TF.trainable = True
x = TF.output
x = Conv2D(256,
           2,
           padding='SAME',
           activation='swish',
           activity_regularizer=regularizers.l2(1e-4),
           kernel_regularizer=regularizers.l2(1e-4))(x)
x = GlobalAveragePooling2D()(x)
x = Flatten()(x)

x = Dense(3096, activation='swish')(x)
x = GaussianDropout(rate=0.2)(x)

x = Dense(2048, activation='swish')(x)
outputs = Dense(1000, activation='softmax')(x)
model = Model(inputs=TF.input, outputs=outputs)
model.summary()

#COMPILE
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau
from tensorflow.train import Checkpoint, latest_checkpoint
model.compile(loss='categorical_crossentropy',
              optimizer=optimizer,
              metrics=['acc'])
mc = ModelCheckpoint('C:/data/MC/best_LT_vision2_LT.hdf5',
                     save_best_only=True,
                     mode='auto')
    x = Conv2D(64, (3, 3), strides=1, padding='same', activation='relu', name='3rdConv' + path)(x)
    x = BatchNormalization(axis=-1, scale=True, trainable=True)(x)
    ox = tf.keras.layers.GlobalAveragePooling2D(name=path + 'GAP')(x)

    return ox

left_out = CoreNet(ix=left_input, path='Left')
right_out = CoreNet(ix=right_input, path='Right')

c = tf.keras.layers.Lambda(CosineDistance, name="CosineDistance")([left_out, right_out])
e = tf.keras.layers.Lambda(EuclideanDistance, name="EuclideanDistance")([left_out, right_out])

x = tf.keras.layers.concatenate([left_out, right_out, c, e], axis=-1)

x = GaussianDropout(rate=0.5)(x)
x = Dense(1024, activation='relu', name='1stFCL')(x)
x = BatchNormalization(axis=-1, scale=True, trainable=True)(x)

x = GaussianDropout(rate=0.5)(x)
x = Dense(512, activation='relu', name='2ndFCL')(x)
x = BatchNormalization(axis=-1, scale=True, trainable=True)(x)

x = GaussianDropout(rate=0.5)(x)
x = Dense(256, activation='relu', name='3rdFCL')(x)
x = BatchNormalization(axis=-1, scale=True, trainable=True)(x)

x = GaussianDropout(rate=0.5)(x)
output = Dense(1, activation='sigmoid', name='output')(x)

model = Model(inputs=[left_input, right_input], outputs=[output])
Example #9
0
def test_delete_channels_noise(channel_index, data_format):
    layer_test_helper_flatten_2d(GaussianNoise(0.5), channel_index, data_format)
    layer_test_helper_flatten_2d(GaussianDropout(0.5), channel_index, data_format)
    layer_test_helper_flatten_2d(AlphaDropout(0.5), channel_index, data_format)
Example #10
0
    def _init(self):
        inp = Input(shape=(self.config.inputs, ))
        if self.config.embedding:
            layer = Embedding(256,
                              self.config.classes,
                              input_length=self.config.inputs)(inp)
        else:
            layer = inp
        if self.config.input_dropout:
            layer = GaussianDropout(self.config.input_dropout)(layer)

        for neurons, depth in zip(self.config.neuron_list,
                                  self.config.block_depth):

            def dense(in_layer):
                """
                Creates a new dense layer using keras' dense function. The
                parameters
                used
                to create it are given in the parent function call.
                This function exists as the initializer and the regularizer
                are both
                classes
                which have to be freshly instantiated when creating a new layer.
                :param in_layer: layer the new dense layer will be attached
                to in graph
                :return: dense layer
                """
                return Dense(neurons,
                             kernel_initializer=initializer())(in_layer)

            prev_in = layer
            for _ in range(depth):
                key_layer = dense(layer)
                query_layer = dense(layer)
                value_layer = GELU()(key_layer)
                value_layer = dense(value_layer)
                value_layer = Softmax(axis=-1 -
                                      self.config.class_neurons)(value_layer)
                key_layer = Multiply()([key_layer, value_layer])
                layer = Add()([query_layer, key_layer])
                layer = BatchNormalization(axis=1)(layer)
                layer = GaussianDropout(self.config.dropout)(layer)
                layer = GELU()(layer)
            layer = Concatenate(axis=-1)([prev_in, layer])

        if self.config.class_neurons and self.config.embedding:
            layer = GlobalAveragePooling1D()(layer)
        layer = Dense(units=256,
                      activation=self.config.output_activation,
                      kernel_initializer=initializer())(layer)

        self.model = Model(inputs=[inp], outputs=[layer])
        self.model.compile(loss=self.config.loss,
                           optimizer=OPTIMIZER(lr=self.config.learning_rate,
                                               weight_decay_rate=1e-3),
                           metrics=self.config.metrics)
        self.model.summary()
        data = (np.ones((self.config.batch_size, self.config.inputs)),
                np.ones((self.config.batch_size, 1)))
        # Freeze the model graph for improved performance and reduced RAM usage
        self.model.train_on_batch(*data)
        self.model.predict_on_batch(data)
        tf.compat.v1.get_default_graph().finalize()
Example #11
0
def gru_model():
    emb_n = 64
    category_num = {
         'adidmd5': (780369, emb_n),
         'idfamd5': (360, emb_n),
         'imeimd5': (1021836, emb_n),
         'macmd5': (329184, emb_n),
         'openudidmd5': (85051, emb_n),
         'ip': (813719, emb_n),
         'reqrealip': (9748, emb_n),
        'adunitshowid': (800, emb_n),
        'apptype': (91, emb_n),
        'carrier': (4, emb_n),
        'city': (331, emb_n),
        'dvctype': (3, emb_n),
        'model': (5923, emb_n),  # 7957 7958  5922
        'make': (1704, emb_n),
        'mediashowid': (313, emb_n),
        'ntt': (7, emb_n),
        'orientation': (2, emb_n),
        'osv': (185, emb_n),
        'pkgname': (2368, emb_n),
        'ppi': (119, emb_n),
        'ver': (3268, emb_n),
        'screen_area': (1396, emb_n),
        'creative_dpi': (1763, emb_n),
        'hour': (24, emb_n),
        'lan': (33, emb_n),
        'h': (985, emb_n),
        'w': (449, emb_n),

    }
    # 类别型变量输入
    category_inp = Input(shape=(len(category),), name='category_inp')
    cat_embeds = []
    for idx, col in enumerate(category):
        x = Lambda(lambda x: x[:, idx, None])(category_inp)
        x = Embedding(category_num[col][0], category_num[col][1], input_length=1)(x)
        cat_embeds.append(x)
    embeds = concatenate(cat_embeds, axis=2)
    embeds = GaussianDropout(0.5)(embeds)
    # 数值型变量输入
    numerical_inp = Input(shape=(len(numerical),), name='continous_inp')
    print('numerical', len(numerical) // 8 * 8 + 8)
    x2 = Dense(len(numerical) // 8 + 8, activation='relu', kernel_initializer='random_uniform',
               bias_initializer='zeros')(
        numerical_inp)
    x2 = Dropout(0.5)(x2)
    x2 = BatchNormalization()(x2)
    x2 = Reshape([1, int(x2.shape[1])])(x2)
    x = concatenate([embeds, x2], axis=2)
    # 主干网络
    x = CuDNNGRU(128)(x)
    x = BatchNormalization()(x)
    x = Dropout(0.50)(x)
    x = Dense(64, activation='relu', kernel_initializer='random_uniform')(x)
    x = PReLU()(x)
    x = BatchNormalization()(x)
    x = Dropout(0.50)(x)
    x = Dense(32, activation='relu', kernel_initializer='random_uniform')(x)
    x = PReLU()(x)
    x = BatchNormalization()(x)
    x = Dropout(0.50)(x)
    out_p = Dense(1, activation='sigmoid')(x)
    return Model(inputs=[category_inp, numerical_inp], outputs=out_p)
Example #12
0
# valid_generator = idg2.flow(x_valid,y_valid, batch_size=32)
# # test_generator = idg2.flow(x_pred)

mc = ModelCheckpoint('../data/modelcheckpoint/lotte_13.h5',
                     save_best_only=True,
                     verbose=1)

from tensorflow.keras.models import Model
from tensorflow.keras.layers import GlobalAveragePooling2D, Flatten, BatchNormalization, Dense, Activation, GaussianDropout
efficientnet = EfficientNetB4(include_top=False,
                              weights='imagenet',
                              input_shape=(176, 176, 3))
efficientnet.trainable = True
a = efficientnet.output
a = Dense(2048, activation='swish')(a)
a = GaussianDropout(0.3)(a)
a = GlobalAveragePooling2D()(a)
a = Dense(1000, activation='softmax')(a)

model = Model(inputs=efficientnet.input, outputs=a)

from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau
early_stopping = EarlyStopping(patience=15)
lr = ReduceLROnPlateau(patience=5, factor=0.5, verbose=1)
from tensorflow.keras.optimizers import Adam

# model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.0005),
#                 metrics=['acc'])
# learning_history = model.fit_generator(train_generator,epochs=200, steps_per_epoch= len(x_train) / 32,
#     validation_data=valid_generator, callbacks=[early_stopping,lr,mc])
#%% Model 1
# Överi. Hieman muokattu versio introkurssin cifar10-verkosta. 
# Saavuttaa ~99% validation accuracyn. Ei cross-validaatiota, joten mahd. overfittaa.
dropout = 0.2
starting_filters = 8 
kernel = (3,3)
model = Sequential()

model.add(Conv2D(starting_filters, kernel_size=kernel, activation='relu', padding="same", kernel_initializer='he_uniform', input_shape = (28, 28, 1)))
model.add(BatchNormalization())
model.add(Conv2D(starting_filters, kernel_size=kernel, activation='relu', padding="same", kernel_initializer='he_uniform'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(BatchNormalization())
model.add(GaussianDropout(dropout * 0.55))

model.add(Conv2D(2*starting_filters, kernel_size=kernel, activation='relu', padding="same", kernel_initializer='he_uniform'))
model.add(BatchNormalization())
model.add(Conv2D(2*starting_filters, kernel_size=kernel, activation='relu', padding="same", kernel_initializer='he_uniform'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(BatchNormalization())
model.add(GaussianDropout(dropout * 0.7))

model.add(Conv2D(4*starting_filters, kernel_size=kernel, activation='relu', padding="same", kernel_initializer='he_uniform'))
model.add(BatchNormalization())
model.add(Conv2D(4*starting_filters, kernel_size=kernel, activation='relu', padding="same", kernel_initializer='he_uniform'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(BatchNormalization())
Example #14
0
 def pile_layers(self, shape_size, optimizer, loss, layers, last_layer_activator, model_path = '', last_layer_output = 1, embed_model_path = ''):
     
     if model_path != '':
         print(f'Loading model from: {model_path}')
         return load_model(model_path)
     
     model = Sequential()
     
     # First Layer
     first_layer = layers[0]
     
     # First Layer is Dense.
     if first_layer['type'] == 'dense':
         if layers[0]['activation'] == 'lrelu':
             model.add(Dense(int(shape_size*layers[0]['widthModifier']), input_dim=shape_size))
             model.add(LeakyReLU(alpha=0.1))
         else:
             model.add(Dense(int(shape_size*layers[0]['widthModifier']), 
                        input_dim=shape_size, 
                        activation=layers[0]['activation']))
    
     elif first_layer['type'] == 'noise':
         GaussianDropout(layers[0]['widthModifier'])
         
     # First Layers is Embeddings.
     elif first_layer['type'] == 'embedding':
         try:
             embed_model = load_model(embed_model_path)
         except:
             print(f'No model found at {embed_model_path} path.')
         model.add(embed_model.layers[0])
         
         # Make sure the embeddings aren't updated.
         if not first_layer['update']:
             model.layers[0].trainable = False
         
         model.add(Flatten())   
         if layers[0]['dropout'] > 0:
             model.add(Dropout(rate = layers[0]['dropout']))
 
     # Other Layers
     for layer in layers[1:]:
         
         model.add(BatchNormalization())
         
         if layer['type'] == 'lnorm':
             l1 = layer['l1']
             l2 = layer['l2']
             model.add(Dense(int(shape_size*layer['widthModifier']), 
                             input_dim=shape_size, 
                             kernel_regularize = regularizers.l1_l2(l1=l1, l2=l2), 
                             activation=layer['activation']))
             
         elif layer['type'] == 'pooling':
             if layer['pool_type'] == 'avg':
                 AveragePooling1D(pool_size=layer['pool_size'], strides=None, padding='valid', data_format='channels_last')
             elif layer['pool_type'] == 'max':
                 MaxPooling1D(pool_size=layer['pool_size'], strides=None, padding='valid', data_format='channels_last')
 
         elif layer['type'] == 'dense' and layer['activation'] == 'lrelu':
             model.add(Dense(int(shape_size*layer['widthModifier']), 
             input_dim=shape_size, init='normal'))  
             model.add(LeakyReLU(alpha=0.1))
                             
         elif layer['type'] == 'dense':
             model.add(Dense(int(shape_size*layer['widthModifier']), 
             input_dim=shape_size, 
             activation=layer['activation']))     
 
         
         # Should we add some Dropout normilization?
         if layer['dropout'] > 0:
             model.add(Dropout(rate = layer['dropout']))
     
     
     if last_layer_activator:
         model.add(Dense(last_layer_output, activation=last_layer_activator))
     else:
         model.add(Dense(last_layer_output))
     
     metrics = ''
     if loss == 'sparse_categorical_crossentropy':
         metrics = ['sparse_categorical_crossentropy', 'sparse_categorical_accuracy']
     else:
         metrics=[loss, 'accuracy']
     
     # Compile the layer
     model.compile(loss=loss, optimizer = optimizer, metrics = metrics)
     
     return model
Example #15
0
valid_generator = idg2.flow(x_val, y_val)

#2. MODEL
from tensorflow.keras.applications import EfficientNetB3
TF = EfficientNetB3(weights="imagenet",
                    include_top=False,
                    input_shape=image_size)
TF.trainable = True
x = TF.output

x = Conv2D(2048, 4, padding="SAME", activation='swish')(x)
x = GlobalAveragePooling2D()(x)

x = Flatten()(x)
x = Dense(2048, activation='relu')(x)
x = GaussianDropout(0.3)(x)

outputs = Dense(1000, activation='softmax')(x)
model = Model(inputs=TF.input, outputs=outputs)
model.summary()

#COMPILE
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau
model.compile(loss='categorical_crossentropy',
              optimizer=optimizer,
              metrics=['acc'])
mc = ModelCheckpoint('C:/data/MC/best_LT_vision2_1.hdf5',
                     save_best_only=True,
                     mode='auto')
es = EarlyStopping(monitor='val_loss', patience=10, verbose=1, mode='auto')
rl = ReduceLROnPlateau(monitor='val_loss',