def SynapticNeuronUnit(dendrites, filter_size, kernel_size, CRP, d_rate,
                       use_STR):

    if CRP[1] == 'UpSampling':
        dendrites = UpSampling2D(interpolation='bilinear')(dendrites)

    # Synaptic Transmission Regulator, STR, calculates weight and bias for each channel of input tensor
    if use_STR: neuro_potential = SynapticTransmissionRegulator()(dendrites)
    else: neuro_potential = dendrites

    # Main neural potential
    if CRP[0] == 'Normal':
        neuro_potential = Conv2D(filters=filter_size,
                                 kernel_size=kernel_size,
                                 padding=CRP[2],
                                 kernel_initializer='he_uniform',
                                 use_bias=False)(neuro_potential)

    elif CRP[0] == 'Transpose':
        neuro_potential = Conv2DTranspose(filters=filter_size,
                                          kernel_size=kernel_size,
                                          padding=CRP[2],
                                          kernel_initializer='he_uniform',
                                          use_bias=False)(neuro_potential)

    elif CRP[0] == 'Separable':
        neuro_potential = SeparableConv2D(filters=filter_size,
                                          kernel_size=kernel_size,
                                          padding=CRP[2],
                                          depthwise_initializer='he_uniform',
                                          pointwise_initializer='he_uniform',
                                          use_bias=False)(neuro_potential)

    elif CRP[0] == 'Atrous':
        neuro_potential = Conv2D(filters=filter_size,
                                 kernel_size=kernel_size,
                                 strides=2,
                                 padding=CRP[2],
                                 kernel_initializer='he_uniform',
                                 use_bias=False)(neuro_potential)
        neuro_potential = ZeroPadding2D(padding=((1, 0), (1,
                                                          0)))(neuro_potential)

    else:
        neuro_potential = None  # Will be error

    neuro_potential = BatchNormalization(momentum=0.95)(neuro_potential)
    neuro_potential = ParametricSwish()(neuro_potential)

    # Output potential to axons
    if CRP[1] == 'MaxPooling':
        neuro_potential = MaxPooling2D()(neuro_potential)

    if d_rate[0] > 0.0:
        neuro_potential = GaussianDropout(rate=d_rate[0])(neuro_potential)
    if d_rate[1] > 0.0:
        neuro_potential = SpatialDropout2D(rate=d_rate[1])(neuro_potential)

    return neuro_potential
Esempio n. 2
0
 def fun(input_):
     x = input_
     for _ in range(nlayers - 1):
         x = convbnrelu(nfilters)(x)
         x = SpatialDropout2D(dropout)(x)
     x = convbnrelu(nfilters)(x)
     x = MaxPooling2D(pool_size=(2,2))(x)
     return x
Esempio n. 3
0
    def f(input):
        # if not the first block we subsample conv to downsample
        nonl1 = activation(input)
        if subsample:
            cpool = Convolution2D(kernels,
                                  3,
                                  3,
                                  subsample=(2, 2),
                                  init='he_normal',
                                  border_mode='same')(nonl1)
            conv1 = Convolution2D(kernels,
                                  3,
                                  3,
                                  init='he_normal',
                                  border_mode='same')(cpool)
        else:
            conv1 = Convolution2D(kernels,
                                  3,
                                  3,
                                  init='he_normal',
                                  border_mode='same')(nonl1)
        if spatial_dropout > 0:
            conv1 = SpatialDropout2D(spatial_dropout)(conv1)

        nonl2 = activation(conv1)
        conv2 = Convolution2D(kernels,
                              3,
                              3,
                              init='he_normal',
                              border_mode='same')(nonl2)
        if spatial_dropout > 0:
            conv2 = SpatialDropout2D(spatial_dropout)(conv2)

        input1_shape = K.int_shape(input)
        input2_shape = K.int_shape(conv2)
        same_shape = input1_shape == input2_shape
        if subsample:
            ewsum = merge([cpool, conv2], mode='sum')
        else:
            if same_shape:
                ewsum = merge([input, conv2], mode='sum')
            else:
                match = Convolution2D(kernels, 1, 1, init='he_normal')(conv2)
                ewsum = merge([match, conv2], mode='sum')

        return ewsum
def get_model(learning_rate):
  ch, row, col = 3, 160, 320  # camera format

  model = Sequential()

  model.add(Cropping2D(cropping=((30,0),(25,0)),input_shape=(row, col, ch)))
  #model.add(Cropping2D(crop_image,input_shape=(row, col, ch)))
  model.add(Lambda(resize_blog))
  #model.add(Lambda(resize_invidia,input_shape=(row, col, ch)))
  model.add(Lambda(normalize_greyscale))
  
  model.add(Convolution2D(3, 1, 1))

  model.add(Convolution2D(16, 3, 3))
  model.add(Convolution2D(16, 3, 3))
  model.add(MaxPooling2D((2, 2)))
  model.add(ELU())
  model.add(SpatialDropout2D(.5))

  model.add(Convolution2D(32, 3, 3))
  model.add(Convolution2D(32, 3, 3))
  model.add(MaxPooling2D((2, 2)))
  model.add(ELU())
  model.add(SpatialDropout2D(.5))

  model.add(Convolution2D(64, 3, 3))
  model.add(Convolution2D(64, 3, 3))
  model.add(MaxPooling2D((2, 2)))
  model.add(ELU())
  model.add(SpatialDropout2D(.5))

  model.add(Flatten())
  
  model.add(Dense(1024))
  model.add(ELU())

  model.add(Dense(64))
  model.add(ELU())

  model.add(Dense(16))
  model.add(Dense(1))

  model.compile(optimizer=Adam(lr=learning_rate), loss="mse", metrics=['mse'])
  print (model.summary())

  return model
Esempio n. 5
0
 def bulid(self,inputlayer):
 
     Dilatedlayer = Conv2D(self.filters, (self.num_row,self.num_col),dilation_rate = self.dilation_rate,strides=self.strides,padding=self.Padding,use_bias=False,name = self.conv_name, kernel_regularizer = self.Kernel_regularizer)(inputlayer)
     BatchNor = BatchNormalization(axis = 1, scale= False,  name = self.bn_name)(Dilatedlayer)
     # BatchNor = Dropout(0.2)(BatchNor)
     BatchNor = SpatialDropout2D(0.4)(BatchNor)
     Act = Activation('tanh', name= self.name)(BatchNor)
     return Act
Esempio n. 6
0
def to_multiscale_nin(backbone, input_shape, branch_names):
    '''
    Adapts VGG16 from a keras_applications fork into a multiscale VGG16.
    See paper: https://arxiv.org/pdf/1803.11395.pdf
    '''
    # Store mutliscale branch roots
    net_outputs = []
    for layer in backbone.layers:
        if layer.name in branch_names:
            net_outputs.append(layer.output)

    # Pass branches through
    branch_outputs = []
    for branch, stride in zip(net_outputs, [4, 2, 1, 1]):
        x = bb.conv_bn_relu(branch,
                            ch=128,
                            ksize=3,
                            stride=stride,
                            activation='relu')
        x = bb.conv_bn_relu(x, ch=128, ksize=1, stride=1, activation='relu')
        x = bb.conv_bn_relu(x, ch=3, ksize=1, stride=1, activation='relu')
        branch_outputs.append(x)

    # Replace dense layers with conv2d 4x dilated
    x = Conv2D(512, (1, 1),
               dilation_rate=(4, 4),
               padding='same',
               activation=None)(backbone.output)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = SpatialDropout2D(0.1)(x)
    x = Conv2D(512, (1, 1),
               dilation_rate=(4, 4),
               padding='same',
               activation=None)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = SpatialDropout2D(0.1)(x)
    y = Conv2D(3, (1, 1), name='output', activation='relu', padding='same')(x)

    # Merge all branches
    x = Concatenate(axis=-1)([y, *branch_outputs])

    multiscale_model = Model(backbone.inputs, x, name='MultiscaleVgg16')
    multiscale_model.summary()
    return multiscale_model
Esempio n. 7
0
def deconv_block(neurons, block_input, shortcut, bn=False, dropout=None):
    deconv = Conv2DTranspose(neurons, (3, 3), strides=(2, 2), padding="same")(block_input)
    uconv = concatenate([deconv, shortcut])
    uconv = Conv2D(neurons, (3, 3), padding="same", kernel_initializer='he_normal')(uconv)
    if bn:
        uconv = BatchNormalization()(uconv)
    uconv = Activation('relu')(uconv)
    if dropout is not None:
        uconv = SpatialDropout2D(dropout)(uconv)
    uconv = Conv2D(neurons, (3, 3), padding="same", kernel_initializer='he_normal')(uconv)
    if bn:
        uconv = BatchNormalization()(uconv)
    uconv = Activation('relu')(uconv)
    if dropout is not None:
        uconv = SpatialDropout2D(dropout)(uconv)
        
    return uconv
Esempio n. 8
0
    def decoder_a(self):
        """ Decoder for side A """
        kwargs = dict(kernel_size=5,
                      kernel_initializer=self.kernel_initializer)
        decoder_complexity = 320 if self.low_mem else self.config[
            "complexity_decoder_a"]
        dense_dim = 384 if self.low_mem else 512
        decoder_shape = self.input_shape[0] // 16
        input_ = Input(shape=(decoder_shape, decoder_shape, dense_dim))

        var_x = input_

        var_x = UpscaleBlock(decoder_complexity,
                             activation="leakyrelu",
                             **kwargs)(var_x)
        var_x = SpatialDropout2D(0.25)(var_x)
        var_x = UpscaleBlock(decoder_complexity,
                             activation="leakyrelu",
                             **kwargs)(var_x)
        if self.low_mem:
            var_x = SpatialDropout2D(0.15)(var_x)
        else:
            var_x = SpatialDropout2D(0.25)(var_x)
        var_x = UpscaleBlock(decoder_complexity // 2,
                             activation="leakyrelu",
                             **kwargs)(var_x)
        var_x = UpscaleBlock(decoder_complexity // 4,
                             activation="leakyrelu",
                             **kwargs)(var_x)
        var_x = Conv2DOutput(3, 5, name="face_out_a")(var_x)
        outputs = [var_x]

        if self.config.get("learn_mask", False):
            var_y = input_
            var_y = UpscaleBlock(decoder_complexity,
                                 activation="leakyrelu")(var_y)
            var_y = UpscaleBlock(decoder_complexity,
                                 activation="leakyrelu")(var_y)
            var_y = UpscaleBlock(decoder_complexity // 2,
                                 activation="leakyrelu")(var_y)
            var_y = UpscaleBlock(decoder_complexity // 4,
                                 activation="leakyrelu")(var_y)
            var_y = Conv2DOutput(1, 5, name="mask_out_a")(var_y)
            outputs.append(var_y)
        return KerasModel(input_, outputs=outputs, name="decoder_a")
def nvidia_model(input_shape=(160, 320, 3), drop_out=0.5, drop_out_sp=0.2):
    """
    NVIDIA Architecture
    src:[http://images.nvidia.com/content/tegra/automotive/images/2016/solutions/pdf/end-to-end-dl-using-px.pdf]
    """
    print('\n\n ')
    print('>> Building the model (NVIDIA Architecture)...')

    # Pre-processing layer
    model = pre_processing_model(input_shape=input_shape)

    # Other layers
    model.add(
        Convolution2D(24, (5, 5),
                      strides=(2, 2),
                      padding='valid',
                      activation='relu'))
    model.add(SpatialDropout2D(drop_out_sp))
    model.add(
        Convolution2D(36, (5, 5),
                      strides=(2, 2),
                      padding='valid',
                      activation='relu'))
    model.add(SpatialDropout2D(drop_out_sp))
    model.add(
        Convolution2D(48, (5, 5),
                      strides=(2, 2),
                      padding='valid',
                      activation='relu'))
    model.add(SpatialDropout2D(drop_out_sp))
    model.add(Convolution2D(64, (3, 3), activation='relu'))
    model.add(SpatialDropout2D(drop_out_sp))
    model.add(Convolution2D(64, (3, 3), activation='relu'))
    model.add(Flatten())
    model.add(Dropout(drop_out))
    model.add(Dense(100))
    model.add(Dropout(drop_out))
    model.add(Dense(50))
    model.add(Dropout(drop_out))
    model.add(Dense(10))
    model.add(Dropout(drop_out))
    model.add(Dense(1))

    model.summary()
    return model
Esempio n. 10
0
def CapLayer(layer_in, act_f=None, classes=1):
    layer = SpatialDropout2D(settings.options.dropout)(layer_in)
    layer = Conv2D(\
            filters=classes,
            kernel_size=(1,1),
            padding='same',
            activation=act_f,
            use_bias=True)(layer)
    return layer
Esempio n. 11
0
    def RDDNeck(self,
                x,
                out_channels,
                down_flag,
                dilation=1,
                keep_probs=0.1,
                projection_ratio=4):

        inp = x

        if down_flag:
            stride = 2
            reduced_depth = int(int(inp.shape[-1]) // projection_ratio)
        else:
            stride = 1
            reduced_depth = int(out_channels // projection_ratio)

        # side branch
        x = Conv2D(filters=reduced_depth,
                   kernel_size=(1, 1),
                   strides=(1, 1),
                   use_bias=False,
                   dilation_rate=1)(inp)
        x = BatchNormalization()(x)
        x = PReLU(shared_axes=[1, 2])(x)

        x = ZeroPadding2D(padding=(dilation, dilation))(x)
        x = Conv2D(filters=reduced_depth,
                   kernel_size=(3, 3),
                   strides=stride,
                   use_bias=True,
                   dilation_rate=dilation)(x)
        x = BatchNormalization()(x)
        x = PReLU(shared_axes=[1, 2])(x)

        x = Conv2D(filters=out_channels,
                   kernel_size=(1, 1),
                   strides=(1, 1),
                   use_bias=False,
                   dilation_rate=1)(x)
        x = BatchNormalization()(x)

        x = SpatialDropout2D(keep_probs)(x)

        # main branch
        if down_flag:
            inp = MaxPooling2D(pool_size=(2, 2), strides=2)(inp)
        if not inp.shape[-1] == out_channels:
            out_shape = out_channels - inp.shape[-1]
            inp = Permute((1, 3, 2))(inp)
            inp = ZeroPadding2D(padding=((0, 0), (0, out_shape)))(inp)
            inp = Permute((1, 3, 2))(inp)

        x = Add()([x, inp])
        x = PReLU(shared_axes=[1, 2])(x)

        return x
Esempio n. 12
0
def create_model(spatial_dropout_rate_1=0,
                 spatial_dropout_rate_2=0,
                 l2_rate=0):

    # Create a secquential object
    model = Sequential()

    # Conv 1
    model.add(
        Conv2D(filters=32,
               kernel_size=(3, 3),
               kernel_regularizer=l2(l2_rate),
               input_shape=(num_rows, num_columns, num_channels)))
    model.add(LeakyReLU(alpha=0.1))
    model.add(BatchNormalization())

    model.add(SpatialDropout2D(spatial_dropout_rate_1))
    model.add(
        Conv2D(filters=32, kernel_size=(3, 3), kernel_regularizer=l2(l2_rate)))
    model.add(LeakyReLU(alpha=0.1))
    model.add(BatchNormalization())

    # Max Pooling #1
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(SpatialDropout2D(spatial_dropout_rate_1))
    model.add(
        Conv2D(filters=64, kernel_size=(3, 3), kernel_regularizer=l2(l2_rate)))
    model.add(LeakyReLU(alpha=0.1))
    model.add(BatchNormalization())

    model.add(SpatialDropout2D(spatial_dropout_rate_2))
    model.add(
        Conv2D(filters=64, kernel_size=(3, 3), kernel_regularizer=l2(l2_rate)))
    model.add(LeakyReLU(alpha=0.1))
    model.add(BatchNormalization())

    # Reduces each h×w feature map to a single number by taking the average of all h,w values.
    model.add(GlobalAveragePooling2D())

    # Softmax output
    model.add(Dense(num_labels, activation='softmax'))

    return model
Esempio n. 13
0
def qc_model():
    nb_classes = 2

    model = Sequential()

    model.add(Conv2D(16, (3, 3), padding='same', input_shape=(1, 256, 224)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(BatchNormalization())
    model.add(SpatialDropout2D(0.2))

    model.add(Conv2D(32, (3, 3), padding='same'))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(SpatialDropout2D(0.3))

    model.add(Conv2D(32, (3, 3), padding='same'))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(SpatialDropout2D(0.3))

    model.add(Conv2D(64, (3, 3), padding='same'))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2,2)))
    model.add(SpatialDropout2D(0.3))

    model.add(Conv2D(64, (3, 3), padding='same'))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2,2)))
    model.add(SpatialDropout2D(0.3))

    model.add(Conv2D(128, (3, 3), padding='same'))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(SpatialDropout2D(0.4))

    model.add(Conv2D(256, (3, 3), padding='same'))
    model.add(Activation('relu'))
    model.add(SpatialDropout2D(0.5))

    model.add(Flatten())
    model.add(Dense(256, kernel_initializer='uniform'))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))

    model.add(Dense(256, kernel_initializer='uniform'))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))

    model.add(Dense(nb_classes, kernel_initializer='uniform'))
    model.add(Activation('softmax'))

    sgd = SGD(lr=1e-3, momentum=0.9, decay=1e-6, nesterov=True)

    model.compile(loss='categorical_crossentropy',
                  optimizer=sgd,
                  metrics=["accuracy"])

    return model
Esempio n. 14
0
 def m(dropout):
     model = Sequential()
     model.add(c1)
     model.add(SpatialDropout2D(dropout))
     model.add(MaxPooling2D(pool_size=(2, 2)))
     model.add(LeakyReLU(0.2))
     model.add(c2)
     model.add(SpatialDropout2D(dropout))
     model.add(MaxPooling2D(pool_size=(2, 2)))
     model.add(LeakyReLU(0.2))
     model.add(c3)
     model.add(SpatialDropout2D(dropout))
     model.add(MaxPooling2D(pool_size=(2, 2)))
     model.add(LeakyReLU(0.2))
     model.add(c4)
     model.add(AveragePooling2D(pool_size=(4, 4), padding="valid"))
     model.add(Flatten())
     model.add(Activation("sigmoid"))
     return model
Esempio n. 15
0
def create_model():
    """Create and return the NVIDIA model architecture."""

    model = Sequential()
    model.add(
        Lambda(lambda x: (x / 255.0) - 0.5, input_shape=params.input_shape))
    model.add(
        Convolution2D(24,
                      5,
                      5,
                      subsample=(2, 2),
                      activation="elu",
                      init='he_normal'))
    model.add(SpatialDropout2D(params.dropout))
    model.add(
        Convolution2D(36,
                      5,
                      5,
                      subsample=(2, 2),
                      activation="elu",
                      init='he_normal'))
    model.add(SpatialDropout2D(params.dropout))
    model.add(
        Convolution2D(48,
                      5,
                      5,
                      subsample=(2, 2),
                      activation="elu",
                      init='he_normal'))
    model.add(SpatialDropout2D(params.dropout))
    model.add(Convolution2D(64, 3, 3, activation="elu", init='he_normal'))
    model.add(SpatialDropout2D(params.dropout))
    model.add(Convolution2D(64, 3, 3, activation="elu", init='he_normal'))
    model.add(SpatialDropout2D(params.dropout))
    model.add(Flatten())
    model.add(Dense(100, activation="elu", init='he_normal'))
    model.add(Dense(50, activation="elu", init='he_normal'))
    model.add(Dense(10, activation="elu", init='he_normal'))
    model.add(Dense(1, activation="elu", init='he_normal'))

    model.compile(loss='mean_squared_error', optimizer='Adam')
    model.summary()
    return model
Esempio n. 16
0
def build_network():
    net = Sequential()

    net.add(Conv2D(16, kernel_size=5, input_shape=(28, 28, 1), padding='same'))
    net.add(BatchNormalization(momentum=0.8))
    net.add(LeakyReLU(alpha=0.2))

    net.add(Conv2D(32, kernel_size=5, padding='same'))
    net.add(BatchNormalization(momentum=0.8))
    net.add(LeakyReLU(alpha=0.2))

    net.add(Conv2D(64, kernel_size=5, padding='same'))
    net.add(BatchNormalization(momentum=0.8))
    net.add(LeakyReLU(alpha=0.2))

    net.add(MaxPooling2D(pool_size=(2, 2)))
    net.add(SpatialDropout2D(0.3))

    net.add(Conv2D(64, kernel_size=3, padding='same'))
    net.add(BatchNormalization(momentum=0.8))
    net.add(LeakyReLU(alpha=0.2))

    net.add(Conv2D(128, kernel_size=3, padding='same'))
    net.add(BatchNormalization(momentum=0.8))
    net.add(LeakyReLU(alpha=0.2))

    net.add(Conv2D(256, kernel_size=3, padding='same'))
    net.add(BatchNormalization(momentum=0.8))
    net.add(LeakyReLU(alpha=0.2))

    net.add(MaxPooling2D(pool_size=(2, 2)))
    net.add(SpatialDropout2D(0.3))

    net.add(Flatten())

    net.add(Dense(512, activation=None))
    net.add(BatchNormalization(momentum=0.8))
    net.add(ReLU())
    net.add(Dropout(0.1))

    net.add(Dense(10, activation='softmax'))

    return net
Esempio n. 17
0
    def decoder_a(self):
        """ Decoder for side A """
        kwargs = dict(kernel_size=5,
                      kernel_initializer=self.kernel_initializer)
        decoder_complexity = 320 if self.lowmem else self.config[
            "complexity_decoder_a"]
        dense_dim = 384 if self.lowmem else 512
        decoder_shape = self.input_shape[0] // 16
        input_ = Input(shape=(decoder_shape, decoder_shape, dense_dim))

        var_x = input_

        var_x = self.blocks.upscale(var_x, decoder_complexity, **kwargs)
        var_x = SpatialDropout2D(0.25)(var_x)
        var_x = self.blocks.upscale(var_x, decoder_complexity, **kwargs)
        if self.lowmem:
            var_x = SpatialDropout2D(0.15)(var_x)
        else:
            var_x = SpatialDropout2D(0.25)(var_x)
        var_x = self.blocks.upscale(var_x, decoder_complexity // 2, **kwargs)
        var_x = self.blocks.upscale(var_x, decoder_complexity // 4, **kwargs)
        var_x = self.blocks.conv2d(var_x,
                                   3,
                                   kernel_size=5,
                                   padding="same",
                                   activation="sigmoid",
                                   name="face_out")
        outputs = [var_x]

        if self.config.get("mask_type", None):
            var_y = input_
            var_y = self.blocks.upscale(var_y, decoder_complexity)
            var_y = self.blocks.upscale(var_y, decoder_complexity)
            var_y = self.blocks.upscale(var_y, decoder_complexity // 2)
            var_y = self.blocks.upscale(var_y, decoder_complexity // 4)
            var_y = self.blocks.conv2d(var_y,
                                       1,
                                       kernel_size=5,
                                       padding="same",
                                       activation="sigmoid",
                                       name="mask_out")
            outputs.append(var_y)
        return KerasModel(input_, outputs=outputs)
Esempio n. 18
0
def addConvBNSequential(model, filters=32):
    if options.batchnorm:
        model = BatchNormalization()(model)
    if options.dropout > 0.0:
        model = SpatialDropout2D(options.dropout)(model)
    model = Conv2D(filters=filters,
                   kernel_size=(3, 3),
                   padding='same',
                   activation=options.activation)(model)
    return model
Esempio n. 19
0
def NvidiaNet(model):
    model.add(Convolution2D(24, 5, 5, subsample=(2, 2), activation="relu"))
    model.add(SpatialDropout2D(0.2))
    model.add(Convolution2D(36, 5, 5, subsample=(2, 2), activation="relu"))
    model.add(SpatialDropout2D(0.2))
    model.add(Convolution2D(48, 5, 5, subsample=(2, 2), activation="relu"))
    model.add(SpatialDropout2D(0.2))
    model.add(Convolution2D(64, 3, 3, activation="relu"))
    model.add(SpatialDropout2D(0.2))
    model.add(Convolution2D(64, 3, 3, activation="relu"))
    model.add(SpatialDropout2D(0.2))
    model.add(Flatten())
    model.add(Dropout(0.6))
    model.add(Dense(100, activation="elu"))
    model.add(Dense(50, activation="elu"))
    model.add(Dense(10, activation="elu"))
    model.add(Dropout(0.6))
    model.add(Dense(1))
    return model
Esempio n. 20
0
def Conv_block(num_filters,
               filter_size=(3, 3),
               max_pooling=None,
               padding='same',
               dropout=0.0,
               BN=False,
               name='',
               **kwargs):
    """
    2D Convolutional block followed by optional BatchNormalization, Activation (not optional), MaxPooling or Dropout.
    C-(BN)-A-(MP)-(D)
    :param int n_filters: Number of filters used for the convolution.
    :param tuple k_size: Kernel size which is used for all three dimensions.
    :param None/tuple max_pooling: Specifies if a MaxPooling layer should be added. e.g. (1,1,2) for 3D.
    :param string padding: Specifies padding scheme for Conv2D and for MaxPooling. valid/same.
    :param float dropout: Adds a dropout layer if value is greater than 0.
    :param bool BN: Specifies whether to use BatchNormalization or not.
    :param **kwargs: Additional arguments for calling the Conv2D function.
    :return: x: List of resulting output layers.
    """

    x = []
    if BN == False:
        x.append(
            Conv2D(num_filters,
                   kernel_size=filter_size,
                   padding=padding,
                   activation='relu',
                   name='conv_%s' % (name),
                   **kwargs))
    else:
        channel_axis = -1 if K.image_data_format() == "channels_last" else 1
        x.append(
            Conv2D(num_filters,
                   kernel_size=filter_size,
                   padding=padding,
                   name='conv_%s' % (name),
                   **kwargs))
        x.append(
            BatchNormalization(axis=channel_axis,
                               momentum=0.5,
                               scale=False,
                               name='bn_%s' %
                               (name)))  #momentum=0.99 #TODO use 0.5 again
        x.append(Activation('relu', name='act_%s' % (name)))

    if max_pooling is not None:
        x.append(
            MaxPooling2D(strides=max_pooling,
                         padding=padding,
                         name='maxp_%s' % (name)))
    if dropout > 0.0:
        # x.append(Dropout(dropout, name='drop_%s'%(name)))
        x.append(SpatialDropout2D(dropout, name='drop_%s' % (name)))
    return x
Esempio n. 21
0
def createResidualBlock(input, filters, level):
    layer = LeakyReLU(alpha = 0.01)(input)
    layer = Conv2D(filters = filters, kernel_size = 3, strides = 1, padding = 'same', kernel_initializer = 'he_normal')(layer)
    layer = SpatialDropout2D(rate = 0.3, data_format='channels_last')(layer)
    layer = LeakyReLU(alpha = 0.01)(layer)
    
    layer = Conv2D(filters = filters, kernel_size = 3, strides = 1, padding = 'same', kernel_initializer = 'he_normal')(layer)

    added = Add()([input, layer])
    output = LeakyReLU(alpha = 0.01)(added)
    return output
Esempio n. 22
0
 def up_block(n_filters, x, skip):
     x = upsample_concatenate_like(x, skip)
     for _ in range(2):
         x = Conv2D(n_filters, (3, 3),
                    activation=None,
                    padding="same",
                    kernel_initializer="he_normal")(x)
         x = BatchNormalization()(x) if use_batchnorm else x
         x = Activation(activation)(x)
         x = SpatialDropout2D(0.25)(x) if use_dropout else x
     return x
Esempio n. 23
0
def multi_unet(UNET_INPUT,
               dropout_val=0.2,
               batch_norm=False,
               activation='relu'):
    main_input = Input(shape=(UNET_INPUT, UNET_INPUT, 3), name='main_input')
    aux_input = Input(shape=(UNET_INPUT // 4, UNET_INPUT // 4, 8),
                      name='aux_input')

    conv1 = double_conv_layer(main_input, 32, dropout_val, batch_norm)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = double_conv_layer(pool1, 64, dropout_val, batch_norm)  #256
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = double_conv_layer(pool2, 128, dropout_val, batch_norm)  #128

    aux_conv1 = Conv2D(128, (3, 3), padding='same')(aux_input)
    aux_conv1 = BatchNormalization(mode=0, axis=-1)(aux_conv1)
    aux_conv1 = Activation('elu')(aux_conv1)
    aux_conv1 = Conv2D(128, (3, 3), padding='same',
                       name='checkpoint1')(aux_conv1)
    aux_conv1 = BatchNormalization(mode=0, axis=-1)(aux_conv1)
    aux_conv1 = Activation('elu')(aux_conv1)
    aux_conv1 = Dropout(dropout_val, name='checkpoint2')(aux_conv1)
    #aux_conv1 = Reshape((128, 128, 1))(aux_conv1)

    branch_concat = concatenate([conv3, aux_conv1], axis=-1)
    pool3 = MaxPooling2D(pool_size=(2, 2))(branch_concat)

    conv4 = double_conv_layer(pool3, 256, dropout_val, batch_norm)
    pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)

    conv5 = double_conv_layer(pool4, 512, dropout_val, batch_norm)

    up7 = concatenate([UpSampling2D(size=(2, 2))(conv5), conv4], axis=3)
    conv8 = double_conv_layer(up7, 256, dropout_val, batch_norm)

    up8 = concatenate([UpSampling2D(size=(2, 2))(conv8), conv3], axis=3)
    conv9 = double_conv_layer(up8, 128, dropout_val, batch_norm)

    up9 = concatenate([UpSampling2D(size=(2, 2))(conv9), conv2], axis=3)
    conv10 = double_conv_layer(up9, 64, dropout_val, batch_norm)

    up10 = concatenate([UpSampling2D(size=(2, 2))(conv10), conv1], axis=3)
    conv11 = double_conv_layer(up10, 32, 0, batch_norm)

    #crop1 = Cropping2D(((32 ,32), (32, 32)))(conv11)

    spartial1 = SpatialDropout2D(rate=.25)(conv11)

    conv12 = Conv2D(1, (1, 1), activation='sigmoid')(spartial1)

    model = Model(input=[main_input, aux_input], output=conv12)
    return model
Esempio n. 24
0
def create_context_module(input_layer,
                          n_level_filters,
                          dropout_rate=0.3,
                          data_format="channels_last"):
    convolution1 = create_convolution_block(input_layer=input_layer,
                                            n_filters=n_level_filters)
    dropout = SpatialDropout2D(rate=dropout_rate,
                               data_format=data_format)(convolution1)
    convolution2 = create_convolution_block(input_layer=dropout,
                                            n_filters=n_level_filters)
    return convolution2
Esempio n. 25
0
def SpatialDropoutND(x, **kwargs):
    """Choose a function based on input size."""
    dim = K.ndim(x)
    if dim == 3:
        return SpatialDropout1D(**kwargs)
    elif dim == 4:
        return SpatialDropout2D(**kwargs)
    elif dim == 5:
        return SpatialDropout3D(**kwargs)
    else:
        raise Exception('Unsupported input size.')
Esempio n. 26
0
def CNN_structure(input_shape):
    model = Sequential()
    model.add(Lambda(resize_images, input_shape=input_shape))
    model.add(Lambda(lambda x: x / 255. - 0.5))
    model.add(
        Convolution2D(24,
                      5,
                      5,
                      border_mode="same",
                      subsample=(2, 2),
                      activation="elu"))
    model.add(SpatialDropout2D(0.2))
    model.add(
        Convolution2D(36,
                      5,
                      5,
                      border_mode="same",
                      subsample=(2, 2),
                      activation="elu"))
    model.add(SpatialDropout2D(0.2))
    model.add(
        Convolution2D(48,
                      5,
                      5,
                      border_mode="valid",
                      subsample=(2, 2),
                      activation="elu"))
    model.add(SpatialDropout2D(0.2))
    model.add(Convolution2D(64, 3, 3, border_mode="valid", activation="elu"))
    model.add(SpatialDropout2D(0.2))
    model.add(Convolution2D(64, 3, 3, border_mode="valid", activation="elu"))
    model.add(SpatialDropout2D(0.2))
    model.add(Flatten())
    model.add(Dropout(0.5))
    model.add(Dense(100, activation="elu"))
    model.add(Dense(50, activation="elu"))
    model.add(Dense(10, activation="elu"))
    model.add(Dropout(0.5))
    model.add(Dense(1))
    model.compile(optimizer=Adam(lr=0.001), loss='mse')
    return model
def model_cifar_5(input_shape=None,
                  keep_prob=0.5,
                  classes=10,
                  r=1e-2,
                  name='model88'):
    Inpt = Input(shape=input_shape, name='Input_' + name)
    x = Conv2D(32, (3, 3),
               activation='relu',
               kernel_initializer='he_uniform',
               padding='same')(Inpt)
    x = Conv2D(32, (3, 3),
               activation='relu',
               kernel_initializer='he_uniform',
               padding='same')(x)
    x = MaxPooling2D((2, 2))(x)
    x = SpatialDropout2D(0.2)(x)
    x = Conv2D(64, (3, 3),
               activation='relu',
               kernel_initializer='he_uniform',
               padding='same')(x)
    x = Conv2D(64, (3, 3),
               activation='relu',
               kernel_initializer='he_uniform',
               padding='same')(x)
    x = MaxPooling2D((2, 2))(x)
    x = SpatialDropout2D(0.3)(x)
    x = Conv2D(128, (3, 3),
               activation='relu',
               kernel_initializer='he_uniform',
               padding='same')(x)
    x = Conv2D(128, (3, 3),
               activation='relu',
               kernel_initializer='he_uniform',
               padding='same')(x)
    x = MaxPooling2D((2, 2))(x)
    x = SpatialDropout2D(0.4)(x)
    x = GlobalAveragePooling2D()(x)
    prediction = Dense(classes, activation='softmax')(x)
    model = Model(Inpt, prediction, name=name)
    model.summary()
    return model
def nVidia_model_v2():

    model = Sequential()

    #Labmbda Layer: Normalization
    model.add(Lambda(lambda x: x / 255.0 - 0.5, input_shape=(160, 320, 3)))

    # set up cropping2D layer

    #model.add(Cropping2D(cropping=((70,24), (0,0))))
    model.add(Cropping2D(cropping=((70, 24), (60, 60))))

    model.add(
        Conv2D(24, (5, 5), padding="same", strides=(2, 2), activation="relu"))
    model.add(SpatialDropout2D(0.2))
    model.add(
        Conv2D(36, (5, 5), padding="same", strides=(2, 2), activation="relu"))
    model.add(SpatialDropout2D(0.2))
    model.add(
        Conv2D(48, (5, 5), padding="valid", strides=(2, 2), activation="relu"))
    model.add(SpatialDropout2D(0.2))
    model.add(Conv2D(64, (3, 3), padding="valid", activation="relu"))
    model.add(SpatialDropout2D(0.2))
    model.add(Conv2D(64, (3, 3), padding="valid", activation="relu"))
    model.add(SpatialDropout2D(0.2))

    model.add(Flatten())

    model.add(Dense(100))

    model.add(Dropout(0.5))

    model.add(Dense(50))

    model.add(Dropout(0.5))

    model.add(Dense(10))

    model.add(Dense(1))

    return model
def model():
    model = preProcess()
    model.add(Conv2D(24, (5, 5), strides=(2, 2), activation='elu'))
    model.add(SpatialDropout2D(0.2))
    model.add(Conv2D(36, (5, 5), strides=(2, 2), activation='elu'))
    model.add(SpatialDropout2D(0.2))
    model.add(Conv2D(48, (5, 5), strides=(2, 2), activation='elu'))
    model.add(SpatialDropout2D(0.2))
    model.add(Conv2D(64, (3, 3), activation="elu"))
    model.add(SpatialDropout2D(0.2))
    model.add(Conv2D(64, (3, 3), activation="elu"))
    model.add(SpatialDropout2D(0.2))
    model.add(Flatten())
    model.add(Dropout(0.5))
    model.add(Dense(100))
    model.add(Dense(50))
    model.add(Dense(10))
    model.add(Dropout(0.5))
    model.add(Dense(1))

    return model
Esempio n. 30
0
def conv_block(x, params, nb_features):
    x = Conv2D(nb_features, (3, 3),
               activation='linear',
               padding='same',
               kernel_initializer=params['init'],
               bias_initializer=params['init'],
               kernel_regularizer=l2(params['wd']),
               bias_regularizer=l2(params['wd']))(x)
    x = BatchNormalization()(x) if params['bn'] else x
    x = LeakyReLU(alpha=0.0)(x)
    x = SpatialDropout2D(params['dropout'])(x) if params['dropout'] != 0 else x
    return x