Exemple #1
0
 def inner_layer_fn():
     return Dense(conv_size,
                  activation=conv_activation,
                  bias=conv_bias,
                  W_regularizer=l1_l2(conv_l1, conv_l2),
                  b_regularizer=l1_l2(conv_l1, conv_l2),
                  **conv_kwargs)
Exemple #2
0
def basic_model_3(x_size, y_size):
    t_model = Sequential()
    t_model.add(
        Dense(80,
              activation="tanh",
              kernel_initializer='normal',
              input_shape=(x_size, )))
    t_model.add(Dropout(0.2))
    t_model.add(
        Dense(120,
              activation="relu",
              kernel_initializer='normal',
              kernel_regularizer=regularizers.l1(0.01),
              bias_regularizer=regularizers.l1(0.01)))
    t_model.add(Dropout(0.1))
    t_model.add(
        Dense(20,
              activation="relu",
              kernel_initializer='normal',
              kernel_regularizer=regularizers.l1_l2(0.01),
              bias_regularizer=regularizers.l1_l2(0.01)))
    t_model.add(Dropout(0.1))
    t_model.add(Dense(10, activation="relu", kernel_initializer='normal'))
    t_model.add(Dropout(0.0))
    t_model.add(Dense(y_size))

    t_model.compile(
        loss='mean_squared_error',
        # optimizer=tf.keras.optimizers.Nadam(lr=0.0005),
        optimizer=tf.keras.optimizers.RMSprop(0.001),
        metrics=[metrics.mae])
    return (t_model)
def create_model_meta(NUM_CLASS,
                      shape,
                      isPreTrained=False,
                      pathToMetaModelWeights=None,
                      isTrainable=True):
    initializer = GlorotNormal()
    inputs = Input(shape=(shape))
    x = Dense(60,
              activation='relu',
              kernel_regularizer=l1_l2(l1=1e-5, l2=1e-4),
              kernel_initializer=initializer)(inputs)
    x = Dense(30,
              activation='relu',
              kernel_regularizer=l1_l2(l1=1e-5, l2=1e-4),
              kernel_initializer=initializer)(x)
    x = Dense(6,
              activation='relu',
              kernel_regularizer=l1_l2(l1=1e-5, l2=1e-4),
              kernel_initializer=initializer,
              name='final_output')(x)
    output = Dense(2, activation='softmax')(x)
    model = Model(inputs, output)

    if not isPreTrained:
        return model
    else:
        model.load_weights(pathToMetaModelWeights)
        if not isTrainable:
            for layer in model.layers:
                layer.trainable = False
        return model, 1
Exemple #4
0
    def rnn_conv_block(self,
                       n_filter,
                       kernel_size,
                       strides,
                       x,
                       conv_type=Conv2D,
                       drop=True,
                       flatten=False,
                       batchnorm=True,
                       **kwargs):

        x = TD(
            conv_type(
                n_filter,
                kernel_size=kernel_size,
                strides=strides,
                use_bias=self.use_bias,
                padding=self.padding,
                kernel_regularizer=l1_l2(self.regularizer[0],
                                         self.regularizer[1]),
                bias_regularizer=l1_l2(self.regularizer[0],
                                       self.regularizer[1]),
            ), **kwargs)(x)

        if batchnorm:
            x = TD(BatchNormalization())(x)
        x = TD(Activation(self.prev_act))(x)
        if drop:
            x = TD(Dropout(self.drop_rate))(x)
        if flatten:
            x = TD(Flatten())(x)
        return x
Exemple #5
0
def trainCNN(train, test, nrows = 200, ncols = 2500, size_batch = 32):
    '''Train CNN on aligned haplotypes. Setup to train on nrows (haplotypes) X ncols (base pair) alignments'''
    import tensorflow as tf
    from tensorflow.keras import datasets, layers, models, regularizers
    import numpy as np
    tf.random.set_seed(123456)
    # Load data
    train_x, train_y = train
    test_x, test_y = test
    # Some more preprocessing
    train_x = train_x.reshape(train_x.shape + (1,))
    test_x = test_x.reshape(test_x.shape + (1,))
    train_y = np.array([1 if i == 'positive' else 0 for i in train_y])
    test_y = np.array([1 if i == 'positive' else 0 for i in test_y])
    # Early stopping
    callback = tf.keras.callbacks.EarlyStopping(monitor='loss', patience=3)
    # Initialize model
    model = models.Sequential()
    model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(nrows, ncols, 1), kernel_regularizer=regularizers.l1_l2(l1=0.008, l2=0.008)))
    model.add(layers.MaxPooling2D((2, 2)))
    model.add(layers.Conv2D(64, (3, 3), activation='relu', kernel_regularizer=regularizers.l1_l2(l1=0.006, l2=0.006)))
    model.add(layers.MaxPooling2D((2, 2)))
    model.add(layers.Conv2D(64, (3, 3), activation='relu', kernel_regularizer=regularizers.l1_l2(l1=0.002, l2=0.002)))
    model.add(layers.Flatten())
    model.add(layers.Dense(64, activation='relu'))
    model.add(layers.Dense(1, activation='sigmoid'))
    METRICS = [tf.keras.metrics.BinaryAccuracy(name='accuracy'),tf.keras.metrics.Precision(name='precision'),tf.keras.metrics.Recall(name='recall'),tf.keras.metrics.AUC(name='auc')]
    model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=METRICS)
    if size_batch > 0:
        history = model.fit(train_x, train_y, batch_size = size_batch, epochs=30, validation_data=(test_x, test_y), callbacks = [callback])
    else:
        history = model.fit(train_x, train_y, epochs=25, validation_data=(test_x, test_y), callbacks = [callback])
    return (model, history)
Exemple #6
0
    def attention_lstm(self):
        input_x = Input(shape = self.input_shape, name = 'input')
        X = input_x
        
        for i in range(self.lstm_blocks):
            query = Dense(10, name='query_' + str(i))(X)
            key = Dense(10, name='key_' + str(i))(X)
            attention_weights = AdditiveAttention(use_scale = False, name='attention_'+str(i))([query, X, key])
            attention_weights = Dense(1, activation='softmax', name='attention_weights_'+str(i))(attention_weights)
            context = Multiply(name='context_'+str(i))([attention_weights,X])
            X = LSTM(self.n_units, return_sequences = True, 
                     recurrent_dropout=self.recurrent_dropout, 
                     kernel_regularizer=l1_l2(self.lstm_l1, self.lstm_l2),
                     activity_regularizer=l1_l2(self.lstm_l1, self.lstm_l2),
                     name = 'lstm_' + str(i))(context)
            if self.dropout_rate > 0:
                X = Dropout(self.dropout_rate, name='dropout_'+str(i))(X)
                
        X = LSTM(self.n_units, return_sequences = False, 
                 recurrent_dropout=self.recurrent_dropout, 
                 kernel_regularizer=l1_l2(self.lstm_l1, self.lstm_l2),
                 activity_regularizer=l1_l2(self.lstm_l1, self.lstm_l2),
                 name = 'lstm_last')(X)
        if self.dropout_rate > 0:
            X = Dropout(self.dropout_rate, name='dropout_last')(X)
        X = Dense(self.n_outputs, activation=self.activation, name = 'output')(X)

        return Model(inputs=input_x, outputs=X, name='attention_lstm')
Exemple #7
0
    def conv_block(self,
                   n_filter,
                   kernel_size,
                   strides,
                   x,
                   drop=False,
                   conv_type=Conv2D,
                   flatten=False,
                   batchnorm=False,
                   maxpool=False,
                   **kwargs):

        x = conv_type(n_filter,
                      kernel_size=kernel_size,
                      strides=strides,
                      use_bias=self.use_bias,
                      kernel_regularizer=l1_l2(self.regularizer[0],
                                               self.regularizer[1]),
                      bias_regularizer=l1_l2(self.regularizer[0],
                                             self.regularizer[1]),
                      **kwargs)(x)

        if batchnorm:
            x = BatchNormalization()(x)
        x = Activation(self.prev_act)(x)
        if drop:
            x = Dropout(self.drop_rate)(x)
        if maxpool:
            x = MaxPooling2D()(x)
        if flatten:
            x = Flatten()(x)
        return x
Exemple #8
0
def resnet_v1_eembc_tiny(input_shape=[32, 32, 3],
                         num_classes=10,
                         num_filters=[8],
                         kernel_sizes=[3, 1],
                         strides=[1, 2],
                         l1p=1e-4,
                         l2p=0):

    # Input layer, change kernel size to 7x7 and strides to 2 for an official resnet
    inputs = Input(shape=input_shape)
    x = Conv2D(num_filters[0],
               kernel_size=kernel_sizes[0],
               strides=strides[0],
               padding='same',
               kernel_initializer='he_normal',
               kernel_regularizer=l1_l2(l1=l1p, l2=l2p))(inputs)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    # First stack
    # Weight layers
    y = Conv2D(num_filters[0],
               kernel_size=kernel_sizes[0],
               strides=strides[1],
               padding='same',
               kernel_initializer='he_normal',
               kernel_regularizer=l1_l2(l1=l1p, l2=l2p))(x)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)
    y = Conv2D(num_filters[0],
               kernel_size=kernel_sizes[0],
               strides=strides[0],
               padding='same',
               kernel_initializer='he_normal',
               kernel_regularizer=l1_l2(l1=l1p, l2=l2p))(y)
    y = BatchNormalization()(y)

    # Adjust for change in dimension due to stride in identity
    x = Conv2D(num_filters[0],
               kernel_size=kernel_sizes[1],
               strides=strides[1],
               padding='same',
               kernel_initializer='he_normal',
               kernel_regularizer=l1_l2(l1=l1p, l2=l2p))(x)

    # Overall residual, connect weight layer and identity paths
    x = Add()([x, y])
    x = Activation('relu')(x)

    # Final classification layer.
    pool_size = int(np.amin(x.shape[1:3]))
    x = AveragePooling2D(pool_size=pool_size)(x)
    y = Flatten()(x)
    outputs = Dense(num_classes,
                    activation='softmax',
                    kernel_initializer='he_normal')(y)

    # Instantiate model.
    model = Model(inputs=inputs, outputs=outputs)
    return model
Exemple #9
0
def get_model():
    model = Sequential()
    model.add(Conv3D(32, kernel_size=(3, 3, 3), activation='relu', kernel_initializer='he_uniform',
    input_shape=(10, 240, 320, 3),padding ="same", kernel_regularizer=regularizers.l1_l2(l1=1e-5, l2=1e-4)))
    model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2)))
    model.add(MaxPooling3D(pool_size=(2, 2, 2)))
    model.add(BatchNormalization(center=True, scale=True))
    model.add(Dropout(0.7))

    model.add(Conv3D(32, kernel_size=(3, 3, 3), activation='relu', kernel_initializer='he_uniform',
    kernel_regularizer=regularizers.l1_l2(l1=1e-5, l2=1e-4)))
    model.add(MaxPooling3D(pool_size=(2, 2, 2)))
    model.add(BatchNormalization(center=True, scale=True))
    model.add(Dropout(0.7))
    model.add(Flatten())

    model.add(Dense(128, activation='relu', kernel_initializer='he_uniform',
        kernel_regularizer=regularizers.l1_l2(l1=1e-5, l2=1e-4)))
    model.add(Dropout(0.2))

    model.add(Dense(50, activation='softmax'))

    # Compile the model
    model.compile(loss='categorical_crossentropy',
                  optimizer=Adam(lr=0.001),
                  metrics=['accuracy'])
    model.summary()
    # plot_model(model, to_file='images/baseline.png', show_shapes=True, show_layer_names=True)

    return model
Exemple #10
0
def trainwithfit(lr, reg_coef):

    dl_model = keras.Sequential([
        layers.Conv1D(32,
                      context_size,
                      input_shape=(1500, vec_length),
                      kernel_regularizer=regularizers.l1_l2(l1=reg_coef,
                                                            l2=reg_coef)),
        layers.GlobalMaxPooling1D(data_format="channels_first"),
        layers.Dense(32,
                     kernel_regularizer=regularizers.l1_l2(l1=reg_coef,
                                                           l2=reg_coef)),
        layers.Dense(2,
                     activation='softmax',
                     kernel_regularizer=regularizers.l1_l2(l1=reg_coef,
                                                           l2=reg_coef))
    ])

    dl_model.compile(
        optimizer=tf.optimizers.Adam(learning_rate=lr),
        loss='binary_crossentropy',
        metrics=['binary_accuracy'],
    )

    generatorTrain = MyDataset("trainData")
    history = dl_model.fit(x=generatorTrain,
                           verbose=2,
                           epochs=2,
                           callbacks=[tensorboard_callback])

    generatorTest = MyDataset("testData")
    res = dl_model.evaluate(x=generatorTest, )
    return (dl_model, res)
Exemple #11
0
    def __init__(self,
                 embedding_dim,
                 units,
                 vocab_size,
                 p_dropout = 0,
                 l1_reg = 0,
                 l2_reg = 0):

        super(RNN_Decoder, self).__init__()
        self.units = units

        self.embedding = Embedding(vocab_size, embedding_dim)
        self.gru = GRU(self.units,
                       return_sequences = True,
                       return_state = True,
                       recurrent_initializer = 'glorot_uniform',
                       dropout = p_dropout,
                       # recurrent_dropout = p_dropout,
                       kernel_regularizer = l1_l2(l1_reg, l2_reg))
        self.fc1 = Dense(self.units, kernel_regularizer=l1_l2(l1_reg, l2_reg))
        self.fc2 = Dense(vocab_size, kernel_regularizer=l1_l2(l1_reg, l2_reg))

        self.attention = BahdanauAttention(units=units,
                                           p_dropout = p_dropout,
                                           l1_reg = l1_reg,
                                           l2_reg = l2_reg)
        self.dropout = Dropout(p_dropout)
Exemple #12
0
def fitnessLearnModel(max_len,
                      features,
                      lr,
                      cells=32,
                      regularization_base=2e-6):
    inp = Input(shape=(max_len, features), name='fitnessModel_inputs1')
    inp2 = Input(shape=(max_len, features), name='fitnessModel_inputs2')

    mult = Multiply()([inp, inp2])

    mask = Masking(0.0)(mult)
    lstm_Layer = LSTM(cells,
                      activation='relu',
                      return_sequences=False,
                      kernel_initializer=he_normal(24353),
                      name='fitnessModel_lstm1',
                      return_state=False,
                      recurrent_regularizer=l1_l2(regularization_base / 20,
                                                  regularization_base / 20),
                      kernel_regularizer=l1_l2(regularization_base,
                                               regularization_base),
                      bias_regularizer=l1_l2(regularization_base * 2,
                                             regularization_base * 2))

    lstm_out = lstm_Layer(mask)
    out = Dense(1,
                activation='linear',
                kernel_initializer=he_normal(53436),
                name='fitnessModel_denseOut')(lstm_out)

    fitnessModel = Model(inputs=[inp, inp2], outputs=out)
    fitnessModel.compile(optimizer=Adam(lr, clipnorm=1.0, clipvalue=0.5),
                         loss='mse')
    return fitnessModel
Exemple #13
0
def convolutional_block(X, f, filters, stage, block, s=2, l1=0.0, l2=0.01):

    # Defining name basis
    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    # Retrieve Filters
    F1, F2, F3 = filters

    # Save the input value
    X_shortcut = X

    X = Conv2D(filters=F1,
               kernel_size=(1, 1),
               strides=(s, s),
               data_format='channels_first',
               padding='same',
               name=conv_name_base + '2a',
               kernel_initializer=glorot_uniform(seed=0),
               activity_regularizer=l1_l2(l1, l2))(X)
    X = BatchNormalization(axis=1, name=bn_name_base + '2a')(X)
    X = Activation('relu')(X)

    X = Conv2D(filters=F2,
               kernel_size=(f, f),
               strides=(1, 1),
               data_format='channels_first',
               padding='same',
               name=conv_name_base + '2b',
               kernel_initializer=glorot_uniform(seed=0),
               activity_regularizer=l1_l2(l1, l2))(X)
    X = BatchNormalization(axis=1, name=bn_name_base + '2b')(X)
    X = Activation('relu')(X)

    X = Conv2D(filters=F3,
               kernel_size=(1, 1),
               strides=(1, 1),
               data_format='channels_first',
               padding='same',
               name=conv_name_base + '2c',
               kernel_initializer=glorot_uniform(seed=0),
               activity_regularizer=l1_l2(l1, l2))(X)
    X = BatchNormalization(axis=1, name=bn_name_base + '2c')(X)

    X_shortcut = Conv2D(filters=F3,
                        kernel_size=(1, 1),
                        strides=(s, s),
                        data_format='channels_first',
                        padding='same',
                        name=conv_name_base + '1',
                        kernel_initializer=glorot_uniform(seed=0),
                        activity_regularizer=l1_l2(l1, l2))(X_shortcut)
    X_shortcut = BatchNormalization(axis=1,
                                    name=bn_name_base + '1')(X_shortcut)

    X = Add()([X, X_shortcut])
    X = Activation('relu')(X)

    return X
def get_compiled_model():

    # shape=(batch_size, time_steps, channels, row, col)
    input_flir = Input(shape=(
        6,
        3,
        480,
        640,
    ))

    input_bottom = Input(shape=(
        6,
        3,
        480,
        640,
    ))

    input_top = Input(shape=(
        6,
        3,
        480,
        640,
    ))

    x_concat = Concatenate(axis=-1)([input_flir, input_bottom, input_top])
    x_concat = TimeDistributed(
        SeparableConv2D(8, (4, 4), activation="relu",
                        padding="same"))(x_concat)
    x_ConvLSTM2D = ConvLSTM2D(32, (6, 6),
                              padding="same",
                              kernel_regularizer=regularizers.l1_l2(l1=1e-5,
                                                                    l2=1e-4),
                              dropout=0.1,
                              recurrent_dropout=0.1,
                              return_sequences=True)(x_concat)
    x_ConvLSTM2D = BatchNormalization()(x_ConvLSTM2D)
    x_ConvLSTM2D = ConvLSTM2D(64, (4, 4),
                              padding="same",
                              kernel_regularizer=regularizers.l1_l2(l1=1e-5,
                                                                    l2=1e-4),
                              dropout=0.1,
                              recurrent_dropout=0.1,
                              return_sequences=False)(x_ConvLSTM2D)
    x_ConvLSTM2D = BatchNormalization()(x_ConvLSTM2D)

    x_flat = GlobalAveragePooling2D()(x_ConvLSTM2D)
    x_flat = Dropout(.2)(x_flat)
    yh = Dense(3,
               activation="softmax",
               kernel_regularizer=regularizers.l1_l2(l1=1e-5, l2=1e-4))(x_flat)

    model = Model([input_flir, input_bottom, input_top], yh)

    opt = SGD(lr=1e-4, momentum=0.9, decay=1e-4)
    model.compile(loss=categorical_crossentropy,
                  optimizer=opt,
                  metrics=["accuracy"])
    return model
Exemple #15
0
def generate_model_2D(_input_shape):

    model = Sequential()

    model.add(Conv2D(140, (3, 3), input_shape=_input_shape))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Conv2D(70, (3, 3)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Conv2D(20, (3, 3)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Flatten())

    model.add(BatchNormalization())
    model.add(Dropout(0.5))
    model.add(Activation('relu'))

    model.add(
        Dense(256,
              kernel_regularizer=regularizers.l1_l2(l1=1e-5, l2=1e-4),
              bias_regularizer=regularizers.l2(1e-4),
              activity_regularizer=regularizers.l2(1e-5)))

    model.add(BatchNormalization())
    model.add(Dropout(0.5))
    model.add(Activation('relu'))

    model.add(
        Dense(64,
              kernel_regularizer=regularizers.l1_l2(l1=1e-5, l2=1e-4),
              bias_regularizer=regularizers.l2(1e-4),
              activity_regularizer=regularizers.l2(1e-5)))

    model.add(BatchNormalization())
    model.add(Dropout(0.5))
    model.add(Activation('relu'))

    model.add(
        Dense(20,
              kernel_regularizer=regularizers.l1_l2(l1=1e-5, l2=1e-4),
              bias_regularizer=regularizers.l2(1e-4),
              activity_regularizer=regularizers.l2(1e-5)))

    model.add(Dense(2))
    model.add(Activation('softmax'))

    model.compile(
        loss='binary_crossentropy',
        optimizer='adam',
        #metrics=['accuracy', metrics.auc])
        metrics=['accuracy'])

    return model
Exemple #16
0
def EEGNet_old(nb_classes, Chans = 64, Samples = 128, regRate = 0.0001,
           dropoutRate = 0.25, kernels = [(2, 32), (8, 4)], strides = (2, 4)):
    """ Keras Implementation of EEGNet_v1 (https://arxiv.org/abs/1611.08024v2)

    This model is the original EEGNet model proposed on arxiv
            https://arxiv.org/abs/1611.08024v2
    
    with a few modifications: we use striding instead of max-pooling as this 
    helped slightly in classification performance while also providing a 
    computational speed-up. 
    
    Note that we no longer recommend the use of this architecture, as the new
    version of EEGNet performs much better overall and has nicer properties.
    
    Inputs:
        
        nb_classes     : total number of final categories
        Chans, Samples : number of EEG channels and samples, respectively
        regRate        : regularization rate for L1 and L2 regularizations
        dropoutRate    : dropout fraction
        kernels        : the 2nd and 3rd layer kernel dimensions (default is 
                         the [2, 32] x [8, 4] configuration)
        strides        : the stride size (note that this replaces the max-pool
                         used in the original paper)
    
    """

    # start the model
    input_main   = Input((1, Chans, Samples))
    layer1       = Conv2D(16, (Chans, 1), input_shape=(1, Chans, Samples),
                                 kernel_regularizer = l1_l2(l1=regRate, l2=regRate))(input_main)
    layer1       = BatchNormalization(axis=1)(layer1)
    layer1       = Activation('elu')(layer1)
    layer1       = Dropout(dropoutRate)(layer1)
    
    permute_dims = 2, 1, 3
    permute1     = Permute(permute_dims)(layer1)
    
    layer2       = Conv2D(4, kernels[0], padding = 'same', 
                            kernel_regularizer=l1_l2(l1=0.0, l2=regRate),
                            strides = strides)(permute1)
    layer2       = BatchNormalization(axis=1)(layer2)
    layer2       = Activation('elu')(layer2)
    layer2       = Dropout(dropoutRate)(layer2)
    
    layer3       = Conv2D(4, kernels[1], padding = 'same',
                            kernel_regularizer=l1_l2(l1=0.0, l2=regRate),
                            strides = strides)(layer2)
    layer3       = BatchNormalization(axis=1)(layer3)
    layer3       = Activation('elu')(layer3)
    layer3       = Dropout(dropoutRate)(layer3)
    
    flatten      = Flatten(name = 'flatten')(layer3)
    
    dense        = Dense(nb_classes, name = 'dense')(flatten)
    softmax      = Activation('softmax', name = 'softmax')(dense)
    
    return Model(inputs=input_main, outputs=softmax)
Exemple #17
0
def ResNet(chans=64, samples=121, l1=0.0, l2=0.01):
    input_shape = (1, chans, samples)
    input1 = Input(shape=input_shape)

    X = Conv2D(128, (1, 64),
               padding='same',
               input_shape=input_shape,
               data_format='channels_first',
               kernel_initializer=glorot_uniform(seed=0),
               activity_regularizer=l1_l2(l1, l2))(input1)
    X = BatchNormalization(axis=1)(X)
    X = DepthwiseConv2D((64, 1),
                        kernel_initializer=glorot_uniform(seed=0),
                        depth_multiplier=2,
                        data_format='channels_first',
                        activity_regularizer=l1_l2(l1, l2))(X)
    X = BatchNormalization(axis=1)(X)
    X = Activation('elu')(X)
    X = AveragePooling2D((1, 2), data_format='channels_first')(X)

    X = convolutional_block(X,
                            f=3,
                            filters=[64, 64, 256],
                            stage=2,
                            block='a',
                            s=1,
                            l1=l1,
                            l2=l2)
    X = identity_block(X, 3, [64, 64, 256], stage=2, block='b', l1=l1, l2=l2)
    X = identity_block(X, 3, [64, 64, 256], stage=2, block='c', l1=l1, l2=l2)

    X = convolutional_block(X,
                            f=3,
                            filters=[128, 128, 512],
                            stage=3,
                            block='a',
                            s=1,
                            l1=l1,
                            l2=l2)
    X = identity_block(X, 3, [128, 128, 512], stage=3, block='b', l1=l1, l2=l2)
    X = identity_block(X, 3, [128, 128, 512], stage=3, block='c', l1=l1, l2=l2)
    X = identity_block(X, 3, [128, 128, 512], stage=3, block='d', l1=l1, l2=l2)

    X = AveragePooling2D(pool_size=(2, 2), padding='same')(X)

    X = Flatten()(X)
    X = Dense(64, activation='relu')(X)
    X = Dense(32, activation='relu')(X)
    X = Dense(1,
              activation='sigmoid',
              name='fc1',
              kernel_initializer=glorot_uniform(seed=0))(X)

    model = Model(inputs=input1, outputs=X, name='ResNet50')

    return model
Exemple #18
0
def encoder_decoder2(
    latent_dim1, n_outputs, n_timesteps, n_features, l1, l2,
):
    model = tf.keras.Sequential()

    model.add(
   
            GRU(
                latent_dim1,
                activation="relu",
                return_sequences=True,
                recurrent_regularizer=l1_l2(0, 0),
                kernel_regularizer=l1_l2(l1=l1, l2=l2),
                dropout=0.0,
                recurrent_dropout=0.0,            input_shape=(n_timesteps, n_features),

            ),
        
    )
    model.add(Reshape((n_timesteps * latent_dim1 ,)))
    model.add(RepeatVector(n_outputs))
    model.add(Reshape((n_outputs, n_timesteps, latent_dim1 )))
    model.add(Dropout(0.1))
    model.add(
        TimeDistributed(
                GRU(
                    latent_dim1,
                    activation="relu",
                    # return_sequences=True,
                    kernel_regularizer=l1_l2(l1=l1, l2=l2),
                    dropout=0.0,
                    recurrent_dropout=0.0,
                )
        
        )
    )
    model.add(TimeDistributed(Dense(10, activation="relu", kernel_regularizer=l1_l2(l1=0, l2=0))))

    # model.add(
    #     Dense(
    #         10,
    #         activation="relu",
    #         kernel_regularizer=l1_l2(l1=l1, l2=l2),
    #     )
    # )
    # model.add(
    #     Dense(
    #         10,
    #         activation="relu",
    #         kernel_regularizer=l1_l2(l1=l1, l2=l2),
    #     )
    # )
    model.add(TimeDistributed(Dense(1,)))
    return model
Exemple #19
0
def get_attention(units,
                  lstm_units,
                  n_layers_att,
                  p_dropout=0,
                  l1_reg=0,
                  l2_reg=0):

    # W1 = Dense(units, kernel_regularizer=l1_l2(l1_reg, l2_reg), name = 'W_feats')
    W1 = [
        Dense(units, activation='tanh', name='W_feats_{}'.format(i))
        for i in range(n_layers_att)
    ]
    W2 = Dense(units,
               kernel_regularizer=l1_l2(l1_reg, l2_reg),
               name='W_hidden')
    V = Dense(1, kernel_regularizer=l1_l2(l1_reg, l2_reg), name='V')
    f_beta = Dense(1, kernel_regularizer=l1_l2(l1_reg, l2_reg), name='f_beta')
    dropout = Dropout(p_dropout, name='dropout')

    # shape = (batch, attn_features, features_shape)
    encoder_output = Input(feature_vector_shape, name='image_features')
    # shape = (batch, lstm_units)
    hidden_last = Input(lstm_units, name='last_hidden_state')

    projected_features = dropout(W1[0](encoder_output))

    for i in range(1, n_layers_att):
        projected_features = dropout(W1[i](projected_features))

    # shape = (batch, attn_features, 1)
    score = V(tanh(
                    projected_features + \
                    dropout(W2(tf.expand_dims(hidden_last, axis = 1)))
                    ))
    # shape = (batch, attn_features)
    score = dropout(tf.reduce_sum(score, axis=2))
    attention_weights = Activation('softmax', dtype='float32')(score)

    # beta = f_beta(dropout(tf.expand_dims(hidden_last, axis = 1)))
    # shape = (batch, 1)
    beta = f_beta(hidden_last)
    beta = Activation('sigmoid', dtype='float32')(beta)

    # shape = (batch, attn_features, features_shape)
    context_vector = tf.expand_dims(attention_weights, axis=2) * encoder_output
    # shape = (batch, features_shape)
    context_vector = beta * tf.reduce_sum(context_vector, axis=1)
    context_vector = Activation('linear', dtype='float32')(context_vector)

    return Model(inputs=[encoder_output, hidden_last],
                 outputs=[context_vector, attention_weights],
                 name='attention')
Exemple #20
0
def identity_block(X, f, filters, stage, block, l1=0.0, l2=0.01):

    # Defining name basis
    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    # Retrieve Filters
    F1, F2, F3 = filters

    # Save the input value
    X_shortcut = X

    # First component of main path
    X = Conv2D(filters=F1,
               kernel_size=(1, 1),
               strides=(1, 1),
               data_format='channels_first',
               padding='valid',
               name=conv_name_base + '2a',
               kernel_initializer=glorot_uniform(seed=0),
               activity_regularizer=l1_l2(l1, l2))(X)
    X = BatchNormalization(axis=1, name=bn_name_base + '2a')(X)
    X = Activation('relu')(X)

    # Second component of main path
    X = Conv2D(filters=F2,
               kernel_size=(f, f),
               strides=(1, 1),
               data_format='channels_first',
               padding='same',
               name=conv_name_base + '2b',
               kernel_initializer=glorot_uniform(seed=0),
               activity_regularizer=l1_l2(l1, l2))(X)
    X = BatchNormalization(axis=1, name=bn_name_base + '2b')(X)
    X = Activation('relu')(X)

    # Third component of main path
    X = Conv2D(filters=F3,
               kernel_size=(1, 1),
               strides=(1, 1),
               data_format='channels_first',
               padding='valid',
               name=conv_name_base + '2c',
               kernel_initializer=glorot_uniform(seed=0),
               activity_regularizer=l1_l2(l1, l2))(X)
    X = BatchNormalization(axis=1, name=bn_name_base + '2c')(X)

    # Final step: Add shortcut value to main path, and pass it through a RELU activation
    X = Add()([X, X_shortcut])
    X = Activation('relu')(X)

    return X
def residual_unit(x, activation, n_weights):
    res = x

    out = BatchNormalization()(x)
    out = activation(out)
    out = Dense(n_weights, activation=None, kernel_regularizer=l1_l2(1e-4, 1e-4))(out)

    out = BatchNormalization()(x)
    out = activation(out)
    out = Dense(n_weights, activation=None, kernel_regularizer=l1_l2(1e-4, 1e-4))(out)

    out = add([res, out])
    return out
 def test_constructor(self):
     ConditionalRNN(5,
                    cell='GRU',
                    kernel_regularizer=regularizers.l1_l2(l1=1e-5, l2=1e-4))
     ConditionalRNN(5,
                    cell='LSTM',
                    kernel_regularizer=regularizers.l1_l2(l1=1e-5, l2=1e-4))
     ConditionalRNN(5,
                    cell='RNN',
                    kernel_regularizer=regularizers.l1_l2(l1=1e-5, l2=1e-4))
     ConditionalRNN(5, cell='RNN', return_sequences=True)
     ConditionalRNN(5, cell='LSTM', return_sequences=True)
     ConditionalRNN(5, cell='GRU', return_sequences=True)
    def build_model(self, n_features, n_labels):
        """
        The method builds a new member of the ensemble and returns it.

        :type n_features: int
        :param n_features: The number of features.

        :type n_labels: int
        :param n_labels: The number of labels.
        """

        # initialize optimizer and early stopping
        self.optimizer = Adam(lr=self.hyperparameters['lr'], beta_1=0.9, beta_2=0.999, epsilon=None, decay=0., amsgrad=False)
        self.es = EarlyStopping(monitor=f'val_mean_squared_error', min_delta=0.0, patience=self.patience, verbose=1,
                   mode='min', restore_best_weights=True)

        inputs = Input(shape=(n_features,))
        h = GaussianNoise(self.hyperparameters['noise'])(inputs)

        # the encoder part
        for i in range(self.bottelneck_layer):
            h = Dense(self.hyperparameters['neurons'][i], activation='relu',
                      kernel_regularizer=regularizers.l1_l2(self.hyperparameters['l1_hidden'],
                                                            self.hyperparameters['l2_hidden']))(h)
            h = Dropout(self.hyperparameters['dropout'])(h)

        latent = Dense(self.hyperparameters['neurons'][self.bottelneck_layer], activation='linear',
                      kernel_regularizer=regularizers.l1_l2(self.hyperparameters['l1_hidden'],
                                                            self.hyperparameters['l2_hidden']))(h)

        encoder = Model(inputs=inputs, outputs=latent, name='encoder')

        # the decoder part
        latent_inputs = Input(shape=(self.hyperparameters['neurons'][self.bottelneck_layer],))
        h = GaussianNoise(0.0)(latent_inputs)

        for i in range(self.bottelneck_layer + 1, self.n_hidden_layers - 1):
            h = Dense(self.hyperparameters['neurons'][i], activation='relu',
                      kernel_regularizer=regularizers.l1_l2(self.hyperparameters['l1_hidden'],
                                                            self.hyperparameters['l2_hidden']))(h)
            h = Dropout(self.hyperparameters['dropout'])(h)

        decoded = Dense(n_labels, activation='linear',
                        kernel_regularizer=regularizers.l1_l2(self.hyperparameters['l1_out'],
                                                              self.hyperparameters['l2_out']))(h)

        decoder = Model(inputs=latent_inputs, outputs=decoded, name='decoder')

        # endocder-decoder model
        encoder_decoder = Model(inputs, decoder(encoder(inputs)), name='encoder_decoder')
        return encoder_decoder, encoder, decoder
def Build_model(news_input_shape, num_classes):
    model = Sequential()
    #model.add(AveragePooling1D(pool_size=3, strides=1, input_shape=(news_input_shape.shape[1], 1))

    model.add(
        Conv1D(filters=1,
               kernel_size=1,
               name='gaa',
               input_shape=(news_input_shape.shape[1], 1)))
    model.add(Conv1D(filters=4, kernel_size=4, name='0_conv_conc1'))
    model.add(Conv1D(filters=5, kernel_size=3, name='0_conv_conc2'))
    model.add(Conv1D(filters=1, kernel_size=1, name='0_conv_conc3'))
    model.add(Flatten())

    model.add(Dense(units=320, activation='relu', name='dense_layer_200'))
    model.add(Reshape(target_shape=(320, 1)))
    model.add(Conv1D(filters=3, kernel_size=3, name='1_conv_conc'))
    model.add(Conv1D(filters=3, kernel_size=3, name='2_conv_conc'))
    model.add(MaxPooling1D(pool_size=3, strides=1, name='first_avgPool'))
    model.add(Conv1D(filters=3, kernel_size=3, name='2_conv_sconc'))
    model.add(MaxPooling1D(pool_size=3, strides=1, name='first_avgsssPsool'))
    model.add(Flatten())

    model.add(Dropout(rate=0.3))
    model.add(Dense(units=180, activation='relu', name="1_dense_layer"))
    model.add(Reshape(target_shape=(180, 1)))
    model.add(MaxPooling1D(pool_size=3, strides=1, name='first_avgssPosol'))
    model.add(Conv1D(filters=3, kernel_size=2))
    model.add(Conv1D(filters=5, kernel_size=5))
    model.add(MaxPooling1D(pool_size=3, strides=1, name='first_avgPosol1'))
    model.add(Flatten())

    model.add(
        Dense(units=120,
              activation='relu',
              kernel_regularizer=regularizers.l1_l2(l1=1e-5, l2=1e-4)))
    model.add(
        Dense(units=num_classes,
              activation='relu',
              name="2_dense_layer",
              kernel_regularizer=regularizers.l1_l2(l1=1e-5, l2=1e-4),
              bias_regularizer=regularizers.l2(1e-4),
              activity_regularizer=regularizers.l2(1e-5)))
    #model.compile(optimizer='adam', loss='mean_squared_error', metrics=['accuracy'])
    #model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
    model.compile(optimizer='rmsprop',
                  loss='mean_squared_error',
                  metrics=['accuracy'])

    model.summary()
    return model
Exemple #25
0
def get_encoder(node_num, d, K, n_units, nu1, nu2, activation_fn):
    # Input
    x = Input(shape=(node_num,))
    # Encoder layers
    y = [None] * (K + 1)
    y[0] = x  # y[0] is assigned the input
    for i in range(K - 1):
        y[i + 1] = Dense(n_units[i], activation=activation_fn,
                         kernel_regularizer=Reg.l1_l2(l1=nu1, l2=nu2))(y[i])
    y[K] = Dense(d, activation=activation_fn,
                 kernel_regularizer =Reg.l1_l2(l1=nu1, l2=nu2))(y[K - 1])
    # Encoder model
    encoder = Model(inputs=x, outputs=y[K])
    return encoder
Exemple #26
0
    def build_model(self, n_features):
        """
        The method builds a new member of the ensemble and returns it.
        """
        # derived parameters
        self.hyperparameters['n_members'] = self.hyperparameters[
            'n_segments'] * self.hyperparameters['n_members_segment']

        # initialize optimizer and early stopping
        self.optimizer = Adam(lr=self.hyperparameters['lr'],
                              beta_1=0.9,
                              beta_2=0.999,
                              epsilon=None,
                              decay=0.,
                              amsgrad=False)
        self.es = EarlyStopping(monitor=f'val_{self.loss_name}',
                                min_delta=0.0,
                                patience=self.hyperparameters['patience'],
                                verbose=1,
                                mode='min',
                                restore_best_weights=True)

        inputs = Input(shape=(n_features, ))
        h = GaussianNoise(self.hyperparameters['noise_in'],
                          name='noise_input')(inputs)

        for i in range(self.hyperparameters['layers']):
            h = Dense(self.hyperparameters['neurons'],
                      activation='tanh',
                      kernel_regularizer=regularizers.l1_l2(
                          self.hyperparameters['l1_hidden'],
                          self.hyperparameters['l2_hidden']),
                      kernel_initializer='random_uniform',
                      bias_initializer='random_uniform',
                      name=f'hidden_{i}')(h)

            h = Dropout(self.hyperparameters['dropout'],
                        name=f'hidden_dropout_{i}')(h)

        out = Dense(self.n_outputs,
                    activation='softmax',
                    kernel_regularizer=regularizers.l1_l2(
                        self.hyperparameters['l1_out'],
                        self.hyperparameters['l2_out']),
                    kernel_initializer='random_uniform',
                    bias_initializer='random_uniform',
                    name='output')(h)

        model = Model(inputs=inputs, outputs=out)
        return model
Exemple #27
0
    def build(self, input_shape) -> None:
        self.embedding: Embedding = Embedding(input_dim=self.vocabulary_size,
                                              output_dim=self.embedding_size,
                                              input_length=self.sentence_len,
                                              trainable=True)
        self.conv_1: Conv1D = Conv1D(filters=self.conv_filter,
                                     kernel_size=3,
                                     activation="relu",
                                     name="conv_1")
        self.conv_2: Conv1D = Conv1D(filters=self.conv_filter,
                                     kernel_size=4,
                                     activation="relu",
                                     name="conv_2")
        self.conv_3: Conv1D = Conv1D(filters=self.conv_filter,
                                     kernel_size=5,
                                     activation="relu",
                                     name="conv_3")

        if not self.global_max_pool:
            self.pool_1: MaxPool1D = MaxPool1D(pool_size=self.pool_size,
                                               strides=1,
                                               name="pool_1")
            self.pool_2: MaxPool1D = MaxPool1D(pool_size=self.pool_size,
                                               strides=1,
                                               name="pool_2")
            self.pool_3: MaxPool1D = MaxPool1D(pool_size=self.pool_size,
                                               strides=1,
                                               name="pool_3")
        else:
            self.pool_1: GlobalMaxPool1D = GlobalMaxPool1D(name="pool_1")
            self.pool_2: GlobalMaxPool1D = GlobalMaxPool1D(name="pool_2")
            self.pool_3: GlobalMaxPool1D = GlobalMaxPool1D(name="pool_3")

        self.concatenate: Concatenate = Concatenate(axis=1)
        self.flatten: Flatten = Flatten()

        self.dropout_1: Dropout = Dropout(self.drop_rate, name="dropout_1")
        self.dense1 = Dense(self.dense_size,
                            activation="sigmoid",
                            kernel_regularizer=regularizers.l1_l2(
                                self.l1_regularization,
                                self.l2_regularization))
        self.dropout_2: Dropout = Dropout(self.drop_rate, name="dropout_2")
        self.dense: Dense = Dense(self.class_num,
                                  activation="softmax",
                                  kernel_regularizer=regularizers.l1_l2(
                                      self.l1_regularization,
                                      self.l2_regularization))
        super(TextCNN, self).build(input_shape)
Exemple #28
0
def Model(learn_rate=0.01, L1=0, L2=0, fc=4096, mm=0.9):
    #    K.clear_session()
    model = Sequential()
    model.add(
        Conv2D(64, (3, 3),
               activation='relu',
               padding='same',
               input_shape=(new_height, new_width, ch)))
    model.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
    model.add(Conv2D(128, (3, 3), activation='relu', padding='same'))
    model.add(Conv2D(128, (3, 3), activation='relu', padding='same'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
    model.add(Conv2D(256, (3, 3), activation='relu', padding='same'))
    model.add(Conv2D(256, (3, 3), activation='relu', padding='same'))
    model.add(Conv2D(256, (3, 3), activation='relu', padding='same'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
    model.add(Conv2D(512, (3, 3), activation='relu', padding='same'))
    model.add(Conv2D(512, (3, 3), activation='relu', padding='same'))
    model.add(Conv2D(512, (3, 3), activation='relu', padding='same'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
    model.add(Conv2D(512, (3, 3), activation='relu', padding='same'))
    model.add(Conv2D(512, (3, 3), activation='relu', padding='same'))
    model.add(Conv2D(512, (3, 3), activation='relu', padding='same'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
    model.add(Conv2D(512, (3, 3), activation='relu', padding='same'))
    model.add(Conv2D(512, (3, 3), activation='relu', padding='same'))
    model.add(Conv2D(512, (3, 3), activation='relu', padding='same'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
    model.add(Conv2D(512, (3, 3), activation='relu', padding='same'))
    model.add(Conv2D(512, (3, 3), activation='relu', padding='same'))
    model.add(Conv2D(512, (3, 3), activation='relu', padding='same'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
    model.add(Flatten())
    print(model.output_shape)
    model.add(
        Dense(fc,
              activation='relu',
              kernel_regularizer=regularizers.l1_l2(l1=L1, l2=L2)))
    model.add(
        Dense(fc,
              activation='relu',
              kernel_regularizer=regularizers.l1_l2(l1=L1, l2=L2)))
    model.add(Dense(2, activation='softmax'))
    model.compile(optimizer=SGD(lr=learn_rate, momentum=mm),
                  loss='binary_crossentropy',
                  metrics=['accuracy'])
    return model
Exemple #29
0
    def introduce_metadata(path, i):
        if dynamic_input_shapes:
            metadata_input_ = metadata_path = Input(
                shape=(None, None, None, metadata_sizes[i][-1] if isinstance(
                    metadata_sizes[i], tuple) else metadata_sizes[i]))

        else:
            metadata_input_ = metadata_path = Input(
                shape=metadata_sizes[i] if isinstance(metadata_sizes[i], tuple)
                else (1, 1, 1, metadata_sizes[i]))

        metadata_inputs[i] = metadata_input_
        for j, (m_n_f, m_d) in enumerate(
                zip(metadata_number_features[i], metadata_dropout[i])):
            metadata_path = Dropout(m_d)(metadata_path)
            metadata_path = Conv3D(filters=m_n_f,
                                   kernel_size=(1, 1, 1),
                                   padding=padding,
                                   kernel_initializer=kernel_initializer,
                                   kernel_regularizer=regularizers.l1_l2(
                                       l1_reg, l2_reg))(metadata_path)
            metadata_path = activation_function("m{}_activation{}".format(
                i, j))(metadata_path)

        if not isinstance(metadata_sizes[i], tuple):
            broadcast_shape = K.concatenate([
                K.constant([1], dtype="int32"),
                K.shape(path)[1:-1],
                K.constant([1], dtype="int32")
            ])
            metadata_path = K.tile(metadata_path, broadcast_shape)

        return Concatenate(axis=-1)([path, metadata_path])
Exemple #30
0
    def retrieve_extra_output(path, i):
        extra_output_path = path
        for j, (e_o_k_s, e_o_n_f, e_o_d) in enumerate(
                zip(extra_output_kernel_sizes[i],
                    extra_output_number_features[i], extra_output_dropout[i])):
            extra_output_path = Dropout(e_o_d)(extra_output_path)
            extra_output_path = Conv3D(
                filters=e_o_n_f,
                kernel_size=e_o_k_s,
                activation=extra_output_activation_final_layer[i] if j +
                1 == len(extra_output_number_features[i]) else None,
                padding=padding,
                kernel_initializer=kernel_initializer,
                kernel_regularizer=regularizers.l1_l2(l1_reg, l2_reg),
                name=f"s{i + 1}" if j +
                1 == len(extra_output_number_features[i]) else
                None)(extra_output_path)
            if j + 1 < len(extra_output_number_features[i]):
                if (batch_normalization or instance_normalization
                    ) and not relaxed_normalization_scheme:
                    extra_output_path = normalization_function()(
                        extra_output_path)

                extra_output_path = activation_function(
                    "eo{}_activation{}".format(i, j))(extra_output_path)

        return extra_output_path