def f(_input):
     conv = Convolution2D(nb_filter=nb_filter,
                          nb_row=nb_row,
                          nb_col=nb_col,
                          subsample=subsample,
                          padding=border_mode,
                          kernel_initializer="he_normal")(_input)
     norm = BatchNormalization(axis=1)(conv)
     return ELU()(norm)
Exemple #2
0
def conv_block(inputs, filters, init, n):
    m = inputs
    acti_layer = lambda x: ELU()(x)
    for i in range(n):
        acti_layer = acti_layer if i < n - 1 else lambda x: PReLU()(x)
        m = Conv2D(filters, (3, 3), kernel_initializer=init, padding='same')(m)
        m = BatchNormalization()(m)
        m = acti_layer(m)
    return m
Exemple #3
0
 def my_activation(self, name):
     if name == 'prelu':
         return PReLU()
     elif name == 'lrelu':
         return LeakyReLU()
     elif name == 'elu':
         return ELU()
     else:
         return Activation(name)
Exemple #4
0
def residual_block(input, num_feature_maps, filter_size=3):
    conv_1 = BatchNormalization(axis=1, mode=2)(input)
    conv_1 = ELU()(conv_1)
    conv_1 = Convolution2D(num_feature_maps,
                           filter_size,
                           filter_size,
                           border_mode='same',
                           bias=True)(conv_1)

    conv_2 = BatchNormalization(axis=1, mode=2)(conv_1)
    conv_2 = ELU()(conv_2)
    conv_2 = Convolution2D(num_feature_maps,
                           filter_size,
                           filter_size,
                           border_mode='same',
                           bias=True)(conv_2)

    return merge([input, conv_2], mode='sum')
Exemple #5
0
 def f(input):
     conv = Convolution2D(nb_filter=nb_filter,
                          nb_row=nb_row,
                          nb_col=nb_col,
                          subsample=subsample,
                          init="he_normal",
                          border_mode="same")(input)
     norm = BatchNormalization()(conv)
     return ELU()(norm)
Exemple #6
0
def nn():
    input = Input(shape=(11,))
    out = Dense(10)(input)
    out = ELU()(out)
    out = Dense(1)(out)
    model = Model(input=input,output=out)
    model.compile(loss='mae',
                  optimizer=Adam())
    return model
Exemple #7
0
 def f(input):
     #init="he_normal"
     conv = Conv2D(filters=nb_filter,
                   kernel_size=(nb_row, nb_col),
                   strides=strides,
                   kernel_initializer="he_normal",
                   padding='same')(input)
     norm = BatchNormalization()(conv)
     return ELU()(norm)
Exemple #8
0
def create_activation(name):
  if name.lower() == 'LeakyReLU'.lower():
    return LeakyReLU()
  elif name.lower() == 'PReLU'.lower():
    return PReLU()
  elif name.lower() == 'ELU'.lower():
    return ELU()
  else:
    return Activation(name)
Exemple #9
0
    def inception_block(inputs, depth, splitted=False, activation='relu'):

        assert depth % 16 == 0
        actv = activation == 'relu' and (lambda: LeakyReLU(
            0.0)) or activation == 'elu' and (lambda: ELU(1.0)) or None

        c1_1 = Conv2D(depth / 4, (1, 1),
                      kernel_initializer='he_normal',
                      padding='same')(inputs)

        c2_1 = Conv2D(depth / 8 * 3, (1, 1),
                      kernel_initializer='he_normal',
                      padding='same')(inputs)
        c2_1 = actv()(c2_1)
        if splitted:
            c2_2 = Conv2D(depth / 2, (1, 3),
                          kernel_initializer='he_normal',
                          padding='same')(c2_1)
            c2_2 = BatchNormalization(axis=1)(c2_2)
            c2_2 = actv()(c2_2)
            c2_3 = Conv2D(depth / 2, (3, 1),
                          kernel_initializer='he_normal',
                          padding='same')(c2_2)
        else:
            c2_3 = Conv2D(depth / 2, (3, 3),
                          kernel_initializer='he_normal',
                          padding='same')(c2_1)

        c3_1 = Conv2D(depth / 16, (1, 1),
                      kernel_initializer='he_normal',
                      padding='same')(inputs)
        #missed batch norm
        c3_1 = actv()(c3_1)
        if splitted:
            c3_2 = Conv2D(depth / 8, (1, 5),
                          kernel_initializer='he_normal',
                          padding='same')(c3_1)
            c3_2 = BatchNormalization(axis=1)(c3_2)
            c3_2 = actv()(c3_2)
            c3_3 = Conv2D(depth / 8, (5, 1),
                          kernel_initializer='he_normal',
                          padding='same')(c3_2)
        else:
            c3_3 = Conv2D(depth / 8, (5, 5),
                          kernel_initializer='he_normal',
                          padding='same')(c3_1)

        p4_1 = MaxPooling2D(pool_size=(3, 3), strides=(1, 1),
                            padding='same')(inputs)
        c4_2 = Conv2D(depth / 8, (1, 1),
                      kernel_initializer='he_normal',
                      padding='same')(p4_1)

        res = concatenate([c1_1, c2_3, c3_3, c4_2], axis=1)
        res = BatchNormalization(axis=1)(res)
        res = actv()(res)
        return res
def make_conv_bn_elu(x, filters, kernel_size=1, stride=1, padding=0):
    if padding == 0:
        padding = 'valid'
    else:
        padding = 'same'
    x = Conv2D(filters=filters, kernel_size=(kernel_size, kernel_size), strides=(stride, stride), padding=padding)(x)
    x = BatchNormalization()(x)
    x = ELU()(x)
    return x
Exemple #11
0
def Nvidia():
    # Modified NVidia model
    model = Sequential()
    # Crop the original image to remove the sky and hood.
    model.add(
        Cropping2D(cropping=((75, 25), (0, 0)), input_shape=(160, 320, 3)))
    # Normaize the input values so that they lie in (-0.5, 0.5)
    model.add(Lambda(lambda x: x / 255.0 - 0.5, name="normalization"))

    model.add(
        Conv2D(24,
               5,
               5,
               subsample=(2, 2),
               init="he_normal",
               border_mode="valid",
               name="conv1"))
    model.add(ELU())
    model.add(
        Conv2D(36, 3, 3, init="he_normal", border_mode="valid", name="conv2"))
    model.add(ELU())
    model.add(
        Conv2D(48, 3, 3, init="he_normal", border_mode="valid", name="conv3"))
    model.add(ELU())
    model.add(
        Conv2D(64,
               5,
               5,
               subsample=(2, 2),
               init="he_normal",
               border_mode="valid",
               name="conv4"))
    model.add(ELU())
    model.add(
        Conv2D(64,
               5,
               5,
               subsample=(2, 2),
               init="he_normal",
               border_mode="valid",
               name="conv5"))
    model.add(ELU())
    model.add(Flatten())

    model.add(Dense(100, name="hidden1", init='he_normal'))
    model.add(ELU())
    model.add(Dense(50, name="hidden2", init='he_normal'))
    model.add(ELU())
    model.add(Dense(10, name="hidden3", init='he_normal'))
    model.add(ELU())

    model.add(Dense(1, name="steering_angle", activation="linear"))

    return model
Exemple #12
0
def DeepConvNet(nb_classes, Chans=64, Samples=256, dropoutRate=0.5):
    """ Keras implementation of the Dense Convolutional Network as described in
    Schirrmeister et. al. (2017), arXiv 1703.0505
    
    Assumes the input is a 2-second EEG signal sampled at 128Hz. Note that in 
    the original paper, they do temporal convolutions of length 10 for EEG 
    signals sampled at 250Hz. This explains why we're using length 5 
    convolutions for 128Hz sampled data (approximately half). We keep the 
    maxpool at (1, 3) with (1, 3) strides. 
    """

    # start the model
    input_main = Input((1, Chans, Samples))
    block1 = Conv2D(25, (1, 5), input_shape=(1, Chans, Samples))(input_main)
    block1 = Conv2D(25, (Chans, 1))(block1)
    block1 = BatchNormalization(axis=1)(block1)
    block1 = ELU()(block1)
    block1 = MaxPooling2D(pool_size=(1, 3), strides=(1, 3))(block1)
    block1 = Dropout(dropoutRate)(block1)

    block2 = Conv2D(50, (1, 5))(block1)
    block2 = BatchNormalization(axis=1)(block2)
    block2 = ELU()(block2)
    block2 = MaxPooling2D(pool_size=(1, 3), strides=(1, 3))(block2)
    block2 = Dropout(dropoutRate)(block2)

    block3 = Conv2D(100, (1, 5))(block2)
    block3 = BatchNormalization(axis=1)(block3)
    block3 = ELU()(block3)
    block3 = MaxPooling2D(pool_size=(1, 3), strides=(1, 3))(block3)
    block3 = Dropout(dropoutRate)(block3)

    block4 = Conv2D(200, (1, 5))(block3)
    block4 = BatchNormalization(axis=1)(block4)
    block4 = ELU()(block4)
    block4 = MaxPooling2D(pool_size=(1, 3), strides=(1, 3))(block4)
    block4 = Dropout(dropoutRate)(block4)

    flatten = Flatten()(block4)

    dense = Dense(nb_classes)(flatten)
    softmax = Activation('softmax')(dense)

    return Model(inputs=input_main, outputs=softmax)
Exemple #13
0
def build_network(input_shape, output_shape):
    state = Input(shape=input_shape)
    h = Convolution2D(32,
                      3,
                      3,
                      border_mode='same',
                      subsample=(2, 2),
                      dim_ordering='th')(state)
    h = ELU(alpha=1.0)(h)
    h = Convolution2D(32,
                      3,
                      3,
                      border_mode='same',
                      subsample=(2, 2),
                      dim_ordering='th')(h)
    h = ELU(alpha=1.0)(h)
    h = Convolution2D(32,
                      3,
                      3,
                      border_mode='same',
                      subsample=(2, 2),
                      dim_ordering='th')(h)
    h = ELU(alpha=1.0)(h)
    h = Convolution2D(32,
                      3,
                      3,
                      border_mode='same',
                      subsample=(2, 2),
                      dim_ordering='th')(h)
    h = ELU(alpha=1.0)(h)
    h = Flatten()(h)

    value = Dense(256, activation='relu')(h)
    value = Dense(1, activation='linear', name='value')(value)
    #policy = LSTM(output_shape, activation='sigmoid', name='policy')(h)
    policy = Dense(output_shape, activation='sigmoid', name='policy')(h)

    value_network = Model(input=state, output=value)
    policy_network = Model(input=state, output=policy)

    advantage = Input(shape=(1, ))
    train_network = Model(input=[state, advantage], output=[value, policy])

    return value_network, policy_network, train_network, advantage
Exemple #14
0
def build_model(X, Y, nb_classes):
    nb_filters = 32  # number of convolutional filters to use
    pool_size = (2, 2)  # size of pooling area for max pooling
    kernel_size = (3, 12)  # convolution kernel size
    input_shape = (1, X.shape[2], X.shape[3])

    model = Sequential()

    model.add(BatchNormalization(axis=1, input_shape=input_shape))
    #layer 1
    model.add(Conv2D(64, (200, 1)))
    model.add(BatchNormalization(axis=1))
    model.add(ELU())

    print(model.output_shape)

    #layer 2
    model.add(Conv2D(32, (1, 4), strides=(1, 2)))
    model.add(BatchNormalization(axis=1))
    model.add(ELU())
    model.add(MaxPooling2D(pool_size=(1, 2)))

    print(model.output_shape)

    #layer 3
    model.add(Conv2D(nb_filters, (1, 4), strides=(1, 2)))
    model.add(BatchNormalization(axis=1))
    model.add(ELU())
    model.add(MaxPooling2D(pool_size=(1, 2)))
    #layer 4
    model.add(Conv2D(nb_filters, (1, 4), strides=(1, 2)))
    model.add(BatchNormalization(axis=1))
    model.add(ELU())
    #layer 5
    model.add(Conv2D(nb_filters, (1, 4), strides=(1, 2)))
    model.add(BatchNormalization(axis=1))
    model.add(ELU())

    model.add(Flatten())
    model.add(Dense(nb_classes))
    model.add(Dropout(0.6))
    model.add(Activation("softmax"))

    return model
def pooling(depth, x):
    x = Conv2D(depth, (1, 1), padding='same', 
               kernel_initializer="he_uniform", 
               kernel_regularizer=regularizers.l2(0.0001), 
               data_format='channels_last')(x)
    x = Activation(ELU())(x)
    x = BatchNormalization()(x)
    
    out = MaxPooling2D(pool_size=(2, 2), data_format='channels_last')(x)
    return out
Exemple #16
0
 def _get_act_by_name(self, act):
     #TODO: parametric activation functios
     str_act = ['relu', 'tanh', 'sigmoid', 'linear','softmax','softplus','softsign','hard_sigmoid']
     if (act == 'selu'):
         return Activation(selu)
     if (act in str_act):
         return Activation(act)
     else:
         return {'prelu': PReLU(), 'elu' : ELU(), 'lrelu' : LeakyReLU(),
                'trelu':ThresholdedReLU()}[act]
def init_model():
    '''
    Define the model and return for training
    Convnet for behavioral cloning
    Inspired by Nvidia and comma.ai
    Deprecated, not actually called in main() anymore 
    Just here to view the architecture quickly!
    '''
    model = Sequential()
    model.add(Lambda(lambda x: x / 127.5 - 1.0, input_shape=(66, 200, 3)))
    model.add(
        Convolution2D(24,
                      5,
                      5,
                      subsample=(2, 2),
                      border_mode='valid',
                      W_regularizer=l2(0.001)))
    model.add(ELU())
    model.add(
        Convolution2D(36,
                      5,
                      5,
                      subsample=(2, 2),
                      border_mode='valid',
                      W_regularizer=l2(0.001)))
    model.add(ELU())
    model.add(
        Convolution2D(48,
                      5,
                      5,
                      subsample=(2, 2),
                      border_mode='valid',
                      W_regularizer=l2(0.001)))
    model.add(ELU())
    #model.add(Dropout(0.50))
    model.add(
        Convolution2D(64, 3, 3, border_mode='valid', W_regularizer=l2(0.001)))
    model.add(ELU())
    model.add(
        Convolution2D(64, 3, 3, border_mode='valid', W_regularizer=l2(0.001)))
    model.add(ELU())
    model.add(Flatten())
    model.add(Dense(128, W_regularizer=l2(0.001)))
    model.add(ELU())
    #model.add(Dropout(0.50))
    model.add(Dense(32, W_regularizer=l2(0.001)))
    model.add(ELU())
    #model.add(Dropout(0.50))
    model.add(Dense(8, W_regularizer=l2(0.001)))
    model.add(ELU())
    #model.add(Dropout(0.50))
    model.add(Dense(1))
    return model
Exemple #18
0
def get_model_nvidia():
    model = Sequential()

    model.add(Lambda(lambda x: x / 127.5 - 1.0, input_shape=(66, 200, 3)))

    model.add(
        Convolution2D(24,
                      5,
                      5,
                      subsample=(2, 2),
                      border_mode='valid',
                      W_regularizer=l2(0.001)))
    model.add(ELU())
    model.add(
        Convolution2D(36,
                      5,
                      5,
                      subsample=(2, 2),
                      border_mode='valid',
                      W_regularizer=l2(0.001)))
    model.add(ELU())
    model.add(
        Convolution2D(48,
                      5,
                      5,
                      subsample=(2, 2),
                      border_mode='valid',
                      W_regularizer=l2(0.001)))
    model.add(ELU())

    model.add(
        Convolution2D(64, 3, 3, border_mode='valid', W_regularizer=l2(0.001)))
    model.add(ELU())
    model.add(
        Convolution2D(64, 3, 3, border_mode='valid', W_regularizer=l2(0.001)))
    model.add(ELU())

    model.add(Flatten())

    model.add(Dense(100, W_regularizer=l2(0.001)))
    model.add(ELU())
    model.add(Dense(50, W_regularizer=l2(0.001)))
    model.add(ELU())
    model.add(Dense(10, W_regularizer=l2(0.001)))
    model.add(ELU())

    # Add a fully connected output layer
    model.add(Dense(1))

    model.compile(optimizer=Adam(lr=1e-4), loss='mse')

    model.summary()
    return model
Exemple #19
0
def get_activation(act, string=False):
    str_act = ['relu', 'tanh', 'sigmoid', 'linear','softmax','softplus','softsign','hard_sigmoid']
    if (act in str_act):
        if string:
            return act
        else:
            return Activation(act)
    else:
        return {'prelu': PReLU(), 'elu' : ELU(), 'lrelu' : LeakyReLU(),
               }[act]
def comma_ai_model(summary=True):
    ch, row, col = 3, 160, 320
    model = Sequential()
    model.add(Lambda(lambda x: x / 255.0 - 0.5, input_shape=(row, col, ch)))
    model.add(Convolution2D(16, 8, 8, subsample=(4, 4), border_mode="same"))
    model.add(ELU())
    model.add(Convolution2D(32, 5, 5, subsample=(2, 2), border_mode="same"))
    model.add(ELU())
    model.add(Convolution2D(64, 5, 5, subsample=(2, 2), border_mode="same"))
    model.add(Flatten())
    model.add(Dropout(.2))
    model.add(ELU())
    model.add(Dense(512))
    model.add(Dropout(.5))
    model.add(ELU())
    model.add(Dense(1))
    if summary:
        model.summary()
    return model
Exemple #21
0
def create_model():
    '''
    NULL
    '''
    _input = Input((256, 256, 4))

    x = make_conv_bn_elu(_input, 32, 3, 2)
    x = make_conv_bn_elu(x, 32, 3, 1, 1)
    x = make_conv_bn_elu(x, 64, 3, 1, 1)

    x1 = MaxPool2D(pool_size=(3, 3), strides=2)(x)
    x2 = make_conv_bn_elu(x, 96, 3, 2)
    x = Concatenate()([x1, x2])

    x = make_conv_bn_elu(x, 16)
    x = make_conv_bn_elu(x, 16)
    x = make_conv_bn_elu(x, 16)

    x = make_block(x, 32)

    x = make_block(x, 32)

    outa = make_block(x, 64)

    outb = make_block(outa, 128)

    # add dropout to a and b here if overfitting
    outa = GlobalAveragePooling2D()(outa)
    outb = GlobalAveragePooling2D()(outb)

    out = Concatenate()([outa, outb])

    out = Dense(512)(out)
    out = BatchNormalization()(out)
    out = ELU()(out)
    out = Dense(512)(out)
    out = BatchNormalization()(out)
    out = ELU()(out)

    out = Dense(17)(out)
    out = Activation('sigmoid')(out)

    return Model(_input, out)
Exemple #22
0
    def set_classifier_vgg16(self):
        extracted_features = Input(shape=(self.num_features, ),
                                   dtype='float32',
                                   name='input')
        if self.batch_norm:
            x = BatchNormalization(axis=-1, momentum=0.99,
                                   epsilon=0.001)(extracted_features)
            x = Activation('relu')(x)
        else:
            x = ELU(alpha=1.0)(extracted_features)

        x = Dropout(0.9)(x)
        x = Dense(self.num_features,
                  name='fc2',
                  kernel_initializer='glorot_uniform')(x)
        if self.batch_norm:
            x = BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001)(x)
            x = Activation('relu')(x)
        else:
            x = ELU(alpha=1.0)(x)
        x = Dropout(0.8)(x)
        x = Dense(self.num_classes,
                  name='predictions',
                  kernel_initializer='glorot_uniform')(x)
        x = Activation('softmax')(x)

        adam = Adam(lr=self.learning_rate,
                    beta_1=0.9,
                    beta_2=0.999,
                    epsilon=1e-08,
                    decay=0.0005)

        classifier = Model(name="classifier",
                           inputs=extracted_features,
                           outputs=x)
        classifier.compile(optimizer=adam,
                           loss='categorical_crossentropy',
                           metrics=['accuracy'])

        print('THIS IS THE MODEL')
        print(classifier.summary())
        return classifier
    def build(width, height, depth, classes):
        # initialize the model along with the input shape to be
        # "channels last" and the channels dimension itself
        model = Sequential()
        inputShape = (height, width, depth)
        chanDim = -1

        # if we are using "channels first", update the input shape
        # and channels dimension
        if K.image_data_format() == "channels_first":
            inputShape = (depth, height, width)
            chanDim = 1

        # Block #1: first CONV => RELU => CONV => RELU => POOL layer set
        model.add(Conv2D(32, (3, 3), padding="same", kernel_initializer = "he_normal",
                         input_shape = inputShape))
        model.add(ELU())
        model.add(BatchNormalization(axis=chanDim))
        model.add(Conv2D(32, (3, 3), kernel_initializer="he_normal", padding = "same"))
        model.add(ELU())
        model.add(BatchNormalization(axis=chanDim))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))

        # Block #2: second CONV => RELU => CONV => RELU => POOL layer set
        model.add(Conv2D(64, (3, 3), kernel_initializer="he_normal", padding = "same"))
        model.add(ELU())
        model.add(BatchNormalization(axis=chanDim))
        model.add(Conv2D(64, (3, 3), kernel_initializer="he_normal", padding = "same"))
        model.add(ELU())
        model.add(BatchNormalization(axis=chanDim))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))

        # Block #3: third CONV => RELU => CONV => RELU => POOL layer set
        model.add(Conv2D(128, (3, 3), kernel_initializer="he_normal", padding = "same"))
        model.add(ELU())
        model.add(BatchNormalization(axis=chanDim))
        model.add(Conv2D(128, (3, 3), kernel_initializer="he_normal", padding = "same"))
        model.add(ELU())
        model.add(BatchNormalization(axis=chanDim))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))

        # Block #6: second set of FC => RELU layers
        model.add(Flatten())
        model.add(Dense(64, kernel_initializer="he_normal"))
        model.add(ELU())
        model.add(BatchNormalization())
        model.add(Dropout(0.5))

        # Block #7: softmax classifier
        model.add(Dense(classes, kernel_initializer="he_normal"))
        model.add(Activation("softmax"))

        # return the constructed network architecture
        return model
Exemple #24
0
 def define_model(self):
     model = Sequential()
     # Input size is the number of dims in state plus action variable
     model.add(Dense(400, input_dim=state_plus_action_dim))
     model.add(ELU())
     model.add(Dropout(.3))
     model.add(Dense(100))
     model.add(ELU())
     model.add(Dropout(.3))
     model.add(Dense(100))
     model.add(ELU())
     model.add(Dropout(.3))
     model.add(Dense(32))
     model.add(ELU())
     model.add(Dropout(.2))
     model.add(Dense(1))
     model.compile(loss='mse', optimizer="adam")
     print "Model has been constructed"
     print model.summary()
     return model
Exemple #25
0
def get_activation_layer(activation):
    if activation == 'LeakyReLU':
        return LeakyReLU()
    if activation == 'PReLU':
        return PReLU()
    if activation == 'ELU':
        return ELU()
    if activation == 'ThresholdedReLU':
        return ThresholdedReLU()

    return Activation(activation)
def rblock(inputs, num, depth, scale=0.1):
    # residual = Convolution2D(depth, num, num, border_mode='same')(inputs)
    residual = Conv2D(depth, (num, num),
                      padding="same",
                      data_format='channels_first')(inputs)

    # residual = BatchNormalization(mode=2, axis=1)(residual)
    residual = BatchNormalization(axis=1)(residual)
    residual = Lambda(lambda x: x * scale)(residual)
    res = _shortcut(inputs, residual)
    return ELU()(res)
Exemple #27
0
def DenseLayer(model, filter_size, dropout, activation, normalization):
    model.add(Dense(filter_size))
    if (dropout > 0.0): model.add(Dropout(dropout))

    # add activation layer
    if activation == 'LeakyReLU': model.add(LeakyReLU(alpha=0.001))
    elif activation == 'ELU': model.add(ELU())
    else: model.add(Activation(activation))

    # add normalization after activation
    if normalization: model.add(BatchNormalization())
Exemple #28
0
 def _bn_relu_conv(nb_filter, nb_row=3, nb_col=3, subsample=1):
     return sequential([
         BatchNormalization(mode=0, axis=1),
         ELU(),
         Convolution2D(nb_filter=nb_filter,
                       nb_row=nb_row,
                       nb_col=nb_col,
                       subsample=(subsample, subsample),
                       init="he_normal",
                       border_mode="same")
     ])
Exemple #29
0
def ConvolutionalLayer(model, filter_size, kernel_size, padding, activation, normalization, input_shape=None):
    if not input_shape == None: model.add(Convolution3D(filter_size, kernel_size, padding=padding, input_shape=input_shape, dilation_rate=1, strides=1))
    else: model.add(Convolution3D(filter_size, kernel_size, padding=padding, dilation_rate=1, strides=1))

    # add activation layer
    if activation == 'LeakyReLU': model.add(LeakyReLU(alpha=0.001))
    elif activation == 'ELU': model.add(ELU())
    else: model.add(Activation(activation))
    
    # add normalization after activation
    if normalization: model.add(BatchNormalization())
Exemple #30
0
 def minires(inputs, n_filters, kernel=1):
     x = Conv2D(int(n_filters), (kernel, kernel),
                padding='valid',
                kernel_initializer='he_normal',
                kernel_regularizer=l2(l2_reg))(inputs)
     x = ELU(alpha=1.0)(x)
     x = Conv2D(n_filters, (kernel, kernel),
                padding='valid',
                kernel_initializer='he_normal',
                kernel_regularizer=l2(l2_reg))(x)
     return x