Пример #1
0
def xception_block(inputs, filters, acti_layer, init):
    n = SeparableConv2D(filters, (3, 3), depthwise_initializer=init, pointwise_initializer=init, padding='same')(inputs)
    n = BatchNormalization()(n)
    n = acti_layer(n)
    n = SeparableConv2D(filters, (3, 3), depthwise_initializer=init, pointwise_initializer=init, padding='same')(n)
    n = BatchNormalization()(n)
    return n
Пример #2
0
def conv2d_block(input_tensor, n_filters, kernel_size=3, batchnorm=True):
    # first layer
    x = SeparableConv2D(filters = n_filters, kernel_size = (kernel_size, kernel_size), 
                        strides=(1, 1), padding='same', data_format='channels_last', 
                        dilation_rate=(1, 1), depth_multiplier=2, 
                        activation= conv_actv, use_bias=True, 
                        depthwise_initializer='glorot_uniform', 
                        pointwise_initializer='glorot_uniform', 
                        bias_initializer='zeros', 
                        depthwise_regularizer=None, pointwise_regularizer=None, #?
                        bias_regularizer=None, activity_regularizer=None, #?
                        depthwise_constraint=None, pointwise_constraint=None, 
                        bias_constraint=None)(input_tensor)
    
    if batchnorm:
        x = BatchNormalization(axis=-1)(x)
    x = Activation("elu")(x)
    # second layer
    x = SeparableConv2D(filters = n_filters, kernel_size = (kernel_size, kernel_size), 
                        strides=(1, 1), padding='same', data_format='channels_last', 
                        dilation_rate=(1, 1), depth_multiplier=2, 
                        activation= conv_actv, use_bias=True, 
                        depthwise_initializer='glorot_uniform', 
                        pointwise_initializer='glorot_uniform', 
                        bias_initializer='zeros', 
                        depthwise_regularizer=None, pointwise_regularizer=None, #?
                        bias_regularizer=None, activity_regularizer=None, #?
                        depthwise_constraint=None, pointwise_constraint=None, 
                        bias_constraint=None)(x)
    if batchnorm:
        x = BatchNormalization(axis=-1)(x)
    x = Activation("elu")(x)
    return x
Пример #3
0
def Sconv_block(input_tensor, kernel_size, filters):
    filters1, filters2 = filters
    kernel_size1,kernel_size2 = kernel_size

    x = SeparableConv2D(filters1, kernel_size1, padding='same',use_bias=False)(input_tensor)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = SeparableConv2D(filters2, kernel_size2, padding='same',use_bias=False)(x)
    x = BatchNormalization()(x)
 
#	residual = Conv2D(filters3, kernel_size3, strides=(2, 2), padding='same', use_bias=False)(input_tensor)
#	residual = BatchNormalization()(residual)
#    
#    x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
#	x = layers.add([x, residual])
    
#    x = Conv2D(filters1, kernel_size1,padding='same' )(input_tensor)
#    x = BatchNormalization()(x)
#    x = Activation('relu')(x)
#
#    x = Conv2D(filters2, kernel_size2,padding='same' )(x)
#    x = BatchNormalization()(x)
#    x = Activation('relu')(x)
#
#    x = Conv2D(filters3, kernel_size3,padding='same')(x)
#    x = BatchNormalization()(x)

#    x = layers.add([x, input_tensor])
#    x = Activation('relu')(x)
    return x
def xceptionMiddleFlow(input_tensor, block):
    residual = input_tensor
    prefix = 'MF_block{}'.format(block)

    x = Activation('relu', name=prefix + '_sepconv1_act')(input_tensor)
    x = SeparableConv2D(728, (3, 3),
                        padding='same',
                        use_bias=False,
                        name=prefix + '_sepconv1')(x)
    x = BatchNormalization(name=prefix + '_sepconv1_bn')(x)
    x = Activation('relu', name=prefix + '_sepconv2_act')(x)
    x = SeparableConv2D(728, (3, 3),
                        padding='same',
                        use_bias=False,
                        name=prefix + '_sepconv2')(x)
    x = BatchNormalization(name=prefix + '_sepconv2_bn')(x)
    x = Activation('relu', name=prefix + '_sepconv3_act')(x)
    x = SeparableConv2D(728, (3, 3),
                        padding='same',
                        use_bias=False,
                        name=prefix + '_sepconv3')(x)
    x = BatchNormalization(name=prefix + '_sepconv3_bn')(x)

    x = layers.add([x, residual])

    return x
Пример #5
0
def entry_flow_resblock(x,
                        first_activation=True,
                        filter_num=128,
                        kernel_size=(3, 3),
                        use_bias=False,
                        name='entry_flow_resblock_'):

    residual = Conv2D(filter_num, (1, 1), strides=(2, 2), use_bias=use_bias)(x)
    residual = BatchNormalization()(residual)

    if first_activation:
        x = Activation('relu')(x)

    x = SeparableConv2D(filter_num,
                        kernel_size,
                        padding='same',
                        use_bias=use_bias,
                        name=name + 'sepconv1')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = SeparableConv2D(filter_num,
                        kernel_size,
                        padding='same',
                        use_bias=use_bias,
                        name=name + 'sepconv2')(x)
    x = BatchNormalization()(x)
    x = MaxPooling2D(kernel_size,
                     strides=(2, 2),
                     padding='same',
                     name=name + 'maxpool')(x)
    x = layers.add([x, residual])

    return x
Пример #6
0
    def shallow_net(self, x):
        """
        x = Conv2D(32, (3, 3), strides = (2, 2), activation = 'relu', padding = 'same', name = 'sep_conv1', data_format = 'channels_last')(x)
        x = SeparableConv2D(64, (3, 3), strides = (2, 2), activation = 'relu', padding = 'same', name = 'sep_conv2')(x)
        x = SeparableConv2D(128, (3, 3), strides = (2, 2), activation = 'relu', padding = 'same', name = 'sep_conv3')(x)
        x = SeparableConv2D(128, (3, 3), activation = 'relu', padding = 'same', name = 'sep_conv4')(x)
        """
        x = Conv2D(32, (3, 3),
                   strides=(2, 2),
                   padding='same',
                   name='sep_conv1',
                   data_format='channels_last')(x)
        x = BatchNormalization()(x)
        x = Activation(relu6)(x)

        x = SeparableConv2D(64, (3, 3),
                            strides=(2, 2),
                            padding='same',
                            name='sep_conv2')(x)
        x = BatchNormalization()(x)
        x = Activation(relu6)(x)

        x = SeparableConv2D(128, (3, 3),
                            strides=(2, 2),
                            padding='same',
                            name='sep_conv3')(x)
        x = BatchNormalization()(x)
        x = Activation(relu6)(x)

        x = SeparableConv2D(128, (3, 3), padding='same', name='sep_conv4')(x)
        x = BatchNormalization()(x)
        x = Activation(relu6)(x)

        return x
Пример #7
0
def make_prev_match_cur_layer(ip_prev, desired_spatial_dimen, desired_channels, 
                weight_decay=DEFAULT_WEIGHT_DECAY):
    ''' Simple helper to ensure that both hidden layer inputs have the same dimensions.
    Args:
        ip_prev: tuple of input keras tensor from previous block and flag for input image
        desired_spatial_dimen: output spatial dimensions
        desired_channels: output channels dimensions
        weight_decay: weight decay factor
        
    Returns: keras tensor output (with either the original or new/adjusted "prev" layer)
    '''
    
    # Get channel axis
    concat_axis = 1 if K.image_data_format() == 'channels_first' else -1
    
    
    # Get shapes of previous layer
    prev_layer, prev_is_input_image = ip_prev
    prev_spatial_dimen, prev_channels = layer_into_spatial_and_channels(prev_layer)
    
    
    # Determine necessary adjustments
    prev_need_spatial_adjust = False
    prev_need_channel_adjust = False
    
    if prev_spatial_dimen != desired_spatial_dimen:
        prev_need_spatial_adjust = True

    if prev_channels != desired_channels:
        prev_need_channel_adjust = True
    
    
    # Make adjustments
    cur_input_image_val = prev_is_input_image
    if prev_need_spatial_adjust or prev_need_channel_adjust:
        # Set input image flag to false
        cur_input_image_val = False
        
        # Put old layer through necessary 1x1 convolution
        if not prev_is_input_image:
            prev_layer = Activation('relu')(prev_layer)
        
        if prev_need_spatial_adjust:
            #prev_layer = Conv2D(desired_channels, (1, 1), strides=(2,2), kernel_initializer='he_uniform', 
            prev_layer = SeparableConv2D(desired_channels, (3, 3), strides=(2,2), kernel_initializer='he_uniform', 
                                    padding='same', use_bias=True, 
                                    kernel_regularizer=l2(weight_decay))(prev_layer)
        else:
            #prev_layer = Conv2D(desired_channels, (1, 1), kernel_initializer='he_uniform', 
            prev_layer = SeparableConv2D(desired_channels, (3, 3), kernel_initializer='he_uniform', 
                                    padding='same', use_bias=True, 
                                    kernel_regularizer=l2(weight_decay))(prev_layer)        

        prev_layer = BatchNormalization(axis=concat_axis, gamma_regularizer=l2(weight_decay),
                                            beta_regularizer=l2(weight_decay))(prev_layer)
                                            
                                            
    # Return adjusted layer
    return (prev_layer, cur_input_image_val)
Пример #8
0
def xception_41(input_shape,
                weight_decay=1e-4,
                kernel_initializer="he_normal",
                bn_epsilon=1e-3,
                bn_momentum=0.99):
    """
    :param input_shape: tuple, i.e., (height, width, channel).
    :param weight_decay: float, default 1e-4.
    :param kernel_initializer: string, default "he_normal".
    :param bn_epsilon: float, default 1e-3.
    :param bn_momentum: float, default 0.99.

    :return: a Keras Model instance.
    """
    input_x = Input(shape=input_shape)
    x = BatchNormalization(epsilon=bn_epsilon, momentum=bn_momentum)(input_x)

    x = Conv2D(32, (3, 3), strides=(2, 2), use_bias=False, name='entry_block1_conv1', padding="same",
               kernel_initializer=kernel_initializer, kernel_regularizer=l2(weight_decay))(x)
    x = BatchNormalization(name='entry_block1_conv1_bn', epsilon=bn_epsilon, momentum=bn_momentum)(x)
    x = Activation('relu', name='entry_block1_conv1_act')(x)
    x = Conv2D(64, (3, 3), use_bias=False, name='entry_block1_conv2', padding="same",
               kernel_initializer=kernel_initializer, kernel_regularizer=l2(weight_decay))(x)
    x = BatchNormalization(name='entry_block1_conv2_bn', epsilon=bn_epsilon, momentum=bn_momentum)(x)
    x = Activation('relu', name='entry_block1_conv2_act')(x)

    x = separable_residual_block(x, [128, 128, 128], "entry_block2", skip_type="conv", stride=2, rate=1,
                               weight_decay=weight_decay, kernel_initializer=kernel_initializer,
                               bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)
    x = separable_residual_block(x, [256, 256, 256], "entry_block3", skip_type="conv", stride=2, rate=1,
                               weight_decay=weight_decay, kernel_initializer=kernel_initializer,
                               bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)
    x = separable_residual_block(x, [728, 728, 728], "entry_block4", skip_type="conv", stride=2, rate=1,
                               weight_decay=weight_decay, kernel_initializer=kernel_initializer,
                               bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)

    for i in range(1, 17):
        x = separable_residual_block(x, [728, 728, 728], "middle_block"+str(i), skip_type="sum", stride=1, rate=1,
                                   weight_decay=weight_decay, kernel_initializer=kernel_initializer,
                                   bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)

    x = separable_residual_block(x, [728, 1024, 1024], "exit_block1", skip_type="conv", stride=1, rate=1,
                               weight_decay=weight_decay, kernel_initializer=kernel_initializer,
                               bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)
    x = SeparableConv2D(1536, (3, 3), strides=1, dilation_rate=2, use_bias=False, padding="same",
                        activation="relu", name="exit_block2_sepconv1",
                        kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(x)
    x = BatchNormalization(name="exit_block2_sepconv1_bn", epsilon=bn_epsilon, momentum=bn_momentum)(x)
    x = SeparableConv2D(1536, (3, 3), strides=1, dilation_rate=2, use_bias=False, padding="same",
                        activation="relu", name="exit_block2_sepconv2",
                        kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(x)
    x = BatchNormalization(name="exit_block2_sepconv2_bn", epsilon=bn_epsilon, momentum=bn_momentum)(x)
    x = SeparableConv2D(2048, (3, 3), strides=1, dilation_rate=2, use_bias=False, padding="same",
                        activation="relu", name="exit_block2_sepconv3",
                        kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(x)
    x = BatchNormalization(name="exit_block2_sepconv3_bn", epsilon=bn_epsilon, momentum=bn_momentum)(x)

    return Model(input_x, x)
Пример #9
0
def EEGNet_SSVEP(nb_classes, Chans = 64, Samples = 128, regRate = 0.0001,
           dropoutRate = 0.25, kernLength = 64, numFilters = 8):
    """ Keras Implementation of the variant of EEGNet that was used to classify
    signals from an SSVEP task (https://arxiv.org/abs/1803.04566)

       
    Inputs:
        
        nb_classes     : int, number of classes to classify
        Chans, Samples : number of channels and time points in the EEG data
        regRate        : regularization parameter for L1 and L2 penalties
        dropoutRate    : dropout fraction
        kernLength     : length of temporal convolution in first layer
        numFilters     : number of temporal-spatial filter pairs to learn
    
    """

    input1   = Input(shape = (1, Chans, Samples))

    ##################################################################
    layer1       = Conv2D(numFilters, (1, kernLength), padding = 'same',
                          kernel_regularizer = l1_l2(l1=0.0, l2=0.0),
                          input_shape = (1, Chans, Samples),
                          use_bias = False)(input1)
    layer1       = BatchNormalization(axis = 1)(layer1)
    layer1       = DepthwiseConv2D((Chans, 1), 
                              depthwise_regularizer = l1_l2(l1=regRate, l2=regRate),
                              use_bias = False)(layer1)
    layer1       = BatchNormalization(axis = 1)(layer1)
    layer1       = Activation('elu')(layer1)
    layer1       = SpatialDropout2D(dropoutRate)(layer1)
    
    layer2       = SeparableConv2D(numFilters, (1, 8), 
                              depthwise_regularizer=l1_l2(l1=0.0, l2=regRate),
                              use_bias = False, padding = 'same')(layer1)
    layer2       = BatchNormalization(axis=1)(layer2)
    layer2       = Activation('elu')(layer2)
    layer2       = AveragePooling2D((1, 4))(layer2)
    layer2       = SpatialDropout2D(dropoutRate)(layer2)
    
    layer3       = SeparableConv2D(numFilters*2, (1, 8), depth_multiplier = 2,
                              depthwise_regularizer=l1_l2(l1=0.0, l2=regRate), 
                              use_bias = False, padding = 'same')(layer2)
    layer3       = BatchNormalization(axis=1)(layer3)
    layer3       = Activation('elu')(layer3)
    layer3       = AveragePooling2D((1, 4))(layer3)
    layer3       = SpatialDropout2D(dropoutRate)(layer3)
    
    
    flatten      = Flatten(name = 'flatten')(layer3)
    
    dense        = Dense(nb_classes, name = 'dense')(flatten)
    softmax      = Activation('softmax', name = 'softmax')(dense)
    
    return Model(inputs=input1, outputs=softmax)
Пример #10
0
def separable_residual_block(inputs,
                           n_filters_list=[256, 256, 256],
                           block_id="entry_block2",
                           skip_type="sum",
                           stride=1,
                           rate=1,
                           weight_decay=1e-4,
                           kernel_initializer="he_normal",
                           bn_epsilon=1e-3,
                           bn_momentum=0.99):
    """ separable residual block
    :param inputs: 4-D tensor, shape of (batch_size, height, width, channel).
    :param n_filters_list: list of int, numbers of filters in the separable convolutions, default [256, 256, 256].
    :param block_id: string, default "entry_block2".
    :param skip_type: string, one of {"sum", "conv", "none"}, default "sum".
    :param stride: int, default 1.
    :param rate: int, default 1.
    :param weight_decay: float, default 1e-4.
    :param kernel_initializer: string, default "he_normal".
    :param bn_epsilon: float, default 1e-3.
    :param bn_momentum: float, default 0.99.

    :return: 4-D tensor, shape of (batch_size, height, width, channel).
    """
    x = Activation("relu", name=block_id+"_sepconv1_act")(inputs)
    x = SeparableConv2D(n_filters_list[0], (3, 3), padding='same', use_bias=False,
                        name=block_id+'_sepconv1', dilation_rate=rate,
                        kernel_initializer=kernel_initializer, kernel_regularizer=l2(weight_decay))(x)
    x = BatchNormalization(name=block_id+'_sepconv1_bn', epsilon=bn_epsilon, momentum=bn_momentum)(x)

    x = Activation('relu', name=block_id+'_sepconv2_act')(x)
    x = SeparableConv2D(n_filters_list[1], (3, 3), padding='same', use_bias=False,
                        name=block_id+'_sepconv2', dilation_rate=rate,
                        kernel_initializer=kernel_initializer, kernel_regularizer=l2(weight_decay))(x)
    x = BatchNormalization(name=block_id+'_sepconv2_bn', epsilon=bn_epsilon, momentum=bn_momentum)(x)

    x = Activation("relu", name=block_id+"_sepconv3_act")(x)
    x = SeparableConv2D(n_filters_list[2], (3, 3), padding="same", use_bias=False,
                        strides=stride, name=block_id+"_sepconv3", dilation_rate=rate,
                        kernel_initializer=kernel_initializer, kernel_regularizer=l2(weight_decay))(x)
    x = BatchNormalization(name=block_id+"_sepconv3_bn", epsilon=bn_epsilon, momentum=bn_momentum)(x)

    if skip_type=="sum":
        x = Add(name=block_id+"_add")([inputs, x])
    elif skip_type=="conv":
        shortcut = Conv2D(n_filters_list[2], (1, 1), strides=stride, padding='same', use_bias=False,
                          kernel_initializer=kernel_initializer, kernel_regularizer=l2(weight_decay))(inputs)
        shortcut = BatchNormalization(epsilon=bn_epsilon, momentum=bn_momentum)(shortcut)
        x = Add(name=block_id+"_add")([shortcut, x])
    else:
        x = x

    return x
Пример #11
0
 def _res_func(x):
     identity = Cropping2D(cropping=((2, 2), (2, 2)))(x)
     a = SeparableConv2D(nb_filter, (nb_row, nb_col),
                         strides=stride,
                         padding='valid')(x)
     a = BatchNormalization()(a)
     a = Activation("relu")(a)
     a = SeparableConv2D(nb_filter, (nb_row, nb_col),
                         strides=stride,
                         padding='valid')(a)
     y = BatchNormalization()(a)
     return add([identity, y])
Пример #12
0
def EEGNet2(nb_classes=2,
            Chans=64,
            Samples=64,
            regRate=0.001,
            dropoutRate=0.25,
            kernLength=64,
            numFilters=8):

    input_conv = Input(shape=(1, Chans, Samples))

    conv_block1 = Conv2D(numFilters, (1, kernLength),
                         padding='same',
                         kernel_regularizer=l1_l2(l1=0.0, l2=0.0),
                         input_shape=(1, Chans, Samples),
                         use_bias=False)(input_conv)
    conv_block1 = BatchNormalization(axis=1)(conv_block1)
    conv_block1 = DepthwiseConv2D((Chans, 1),
                                  depthwise_regularizer=l1_l2(l1=regRate,
                                                              l2=regRate),
                                  use_bias=False)(conv_block1)
    conv_block1 = BatchNormalization(axis=1)(conv_block1)
    conv_block1 = Activation('elu')(conv_block1)
    conv_block1 = SpatialDropout2D(dropoutRate)(conv_block1)

    conv_block2 = SeparableConv2D(numFilters, (1, 8),
                                  depthwise_regularizer=l1_l2(l1=0.0,
                                                              l2=regRate),
                                  use_bias=False,
                                  padding='same')(conv_block1)
    conv_block2 = BatchNormalization(axis=1)(conv_block2)
    conv_block2 = Activation('elu', name='elu_2')(conv_block2)
    conv_block2 = AveragePooling2D((1, 4))(conv_block2)
    conv_block2 = SpatialDropout2D(dropoutRate, name='drop_2')(conv_block2)

    conv_block3 = SeparableConv2D(numFilters * 2, (1, 8),
                                  depth_multiplier=2,
                                  depthwise_regularizer=l1_l2(l1=0.0,
                                                              l2=regRate),
                                  use_bias=False,
                                  padding='same')(conv_block2)
    conv_block3 = BatchNormalization(axis=1)(conv_block3)
    conv_block3 = Activation('elu', name='elu_3')(conv_block3)
    conv_block3 = AveragePooling2D((1, 4))(conv_block3)
    conv_block3 = SpatialDropout2D(dropoutRate, name='drop_3')(conv_block3)

    flatten_layer = Flatten(name='flatten')(conv_block3)

    dense_layer = Dense(nb_classes, name='dense')(flatten_layer)
    out_put = Activation('softmax', name='softmax')(dense_layer)

    return Model(inputs=input_conv, outputs=out_put)
Пример #13
0
def grouped_SeperableConvolution_block(input,
                                       grouped_channels,
                                       cardinality,
                                       strides,
                                       weight_decay=5e-4):
    ''' Adds a grouped convolution block. It is an equivalent block from the paper
    Args:
        input: input tensor
        grouped_channels: grouped number of filters
        cardinality: cardinality factor describing the number of groups
        strides: performs strided convolution for downscaling if > 1
        weight_decay: weight decay term
    Returns: a keras tensor
    '''
    init = input
    channel_axis = 1 if K.image_data_format() == 'channels_first' else -1

    group_list = []

    if cardinality == 1:
        # with cardinality 1, it is a standard convolution
        x = SeparableConv2D(grouped_channels, (3, 3),
                            padding='same',
                            use_bias=False,
                            strides=(strides, strides),
                            kernel_initializer='he_normal',
                            kernel_regularizer=l2(weight_decay))(init)
        x = BatchNormalization(axis=channel_axis)(x)
        x = LeakyReLU()(x)
        return x

    for c in range(cardinality):
        x = Lambda(lambda z: z[:, :, :, c * grouped_channels:(c + 1) *
                               grouped_channels] if K.image_data_format() ==
                   'channels_last' else lambda z: z[:, c * grouped_channels:(
                       c + 1) * grouped_channels, :, :])(input)

        x = SeparableConv2D(grouped_channels, (3, 3),
                            padding='same',
                            use_bias=False,
                            strides=(strides, strides),
                            kernel_initializer='he_normal',
                            kernel_regularizer=l2(weight_decay))(x)

        group_list.append(x)

    group_merge = concatenate(group_list, axis=channel_axis)
    x = BatchNormalization(axis=channel_axis)(group_merge)
    x = LeakyReLU()(x)

    return x
Пример #14
0
    def encoder_model(self, architecture):
        model = Sequential()
        if architecture == 'EEGNet':
            model.add(Conv2D(8, (1, 32), padding='same', use_bias=False))
            model.add(BatchNormalization(axis=3))
            model.add(
                DepthwiseConv2D((self.chans, 1),
                                use_bias=False,
                                depth_multiplier=2,
                                depthwise_constraint=max_norm(1.)))
            model.add(BatchNormalization(axis=3))
            model.add(Activation('elu'))
            model.add(AveragePooling2D((1, 4)))
            model.add(Dropout(0.25))
            model.add(
                SeparableConv2D(16, (1, 16), use_bias=False, padding='same'))
            model.add(BatchNormalization(axis=3))
            model.add(Activation('elu'))
            model.add(AveragePooling2D((1, 8)))
            model.add(Dropout(0.25))
            model.add(Flatten())
        elif architecture == 'DeepConvNet':
            model.add(Conv2D(25, (1, 5)))
            model.add(Conv2D(25, (self.chans, 1), use_bias=False))
            model.add(BatchNormalization(axis=3, epsilon=1e-05, momentum=0.1))
            model.add(Activation('elu'))
            model.add(MaxPooling2D(pool_size=(1, 2), strides=(1, 2)))
            model.add(Dropout(0.5))
            model.add(Conv2D(50, (1, 5), use_bias=False))
            model.add(BatchNormalization(axis=3, epsilon=1e-05, momentum=0.1))
            model.add(Activation('elu'))
            model.add(MaxPooling2D(pool_size=(1, 2), strides=(1, 2)))
            model.add(Dropout(0.5))
            model.add(Conv2D(100, (1, 5), use_bias=False))
            model.add(BatchNormalization(axis=3, epsilon=1e-05, momentum=0.1))
            model.add(Activation('elu'))
            model.add(MaxPooling2D(pool_size=(1, 2), strides=(1, 2)))
            model.add(Dropout(0.5))
            model.add(Conv2D(200, (1, 5), use_bias=False))
            model.add(BatchNormalization(axis=3, epsilon=1e-05, momentum=0.1))
            model.add(Activation('elu'))
            model.add(MaxPooling2D(pool_size=(1, 2), strides=(1, 2)))
            model.add(Dropout(0.5))
            model.add(Flatten())
        elif architecture == 'ShallowConvNet':
            model.add(Conv2D(40, (1, 13)))
            model.add(Conv2D(40, (self.chans, 1), use_bias=False))
            model.add(BatchNormalization(axis=3, epsilon=1e-05, momentum=0.1))
            model.add(Activation(lambda x: tf.square(x)))
            model.add(AveragePooling2D(pool_size=(1, 35), strides=(1, 7)))
            model.add(
                Activation(lambda x: tf.log(
                    tf.clip(x, min_value=1e-7, max_value=10000))))
            model.add(Dropout(0.5))
            model.add(Flatten())

        input = Input(shape=(self.chans, self.samples, 1))
        latent = model(input)

        return Model(input, latent, name='enc')
Пример #15
0
    def feature_fusion_unit(self, input_tensor1, input_tensor2):

        input1 = Conv2D(128, (1, 1),
                        strides=(1, 1),
                        padding='same',
                        name='unit_conv1')(input_tensor1)
        input2 = UpSampling2D((4, 4))(input_tensor2)
        #input2 = SeparableConv2D(128, (3, 3), strides = (1, 1),  dilation_rate=(4, 4), activation = 'relu', padding = 'same')(input2)
        input2 = SeparableConv2D(128, (3, 3),
                                 strides=(1, 1),
                                 dilation_rate=(4, 4),
                                 padding='same')(input2)
        input2 = BatchNormalization()(input2)
        input2 = Activation(relu6)(input2)

        input2 = Conv2D(128, (1, 1),
                        strides=(1, 1),
                        padding='same',
                        name='unit_conv2')(input2)
        input_tensors = add([input1, input2])

        result = Conv2D(self.n_labels, (1, 1),
                        strides=(1, 1),
                        activation='softmax',
                        padding='same',
                        name='conv_last')(input_tensors)

        return result
Пример #16
0
def EEGNet(input_layer, F1=4, kernLength=64, D=2, Chans=22, dropout=0.1):
    F2 = F1 * D
    block1 = Conv2D(F1, (kernLength, 1),
                    padding='same',
                    data_format='channels_last',
                    use_bias=False)(input_layer)
    block1 = BatchNormalization(axis=-1)(block1)
    block2 = DepthwiseConv2D((1, Chans),
                             use_bias=False,
                             depth_multiplier=D,
                             data_format='channels_last',
                             depthwise_constraint=max_norm(1.))(block1)
    block2 = BatchNormalization(axis=-1)(block2)
    block2 = Activation('elu')(block2)
    block2 = AveragePooling2D((8, 1), data_format='channels_last')(block2)
    block2 = Dropout(dropout)(block2)
    block3 = SeparableConv2D(F2, (16, 1),
                             data_format='channels_last',
                             use_bias=False,
                             padding='same')(block2)
    block3 = BatchNormalization(axis=-1)(block3)
    block3 = Activation('elu')(block3)
    block3 = AveragePooling2D((8, 1), data_format='channels_last')(block3)
    block3 = Dropout(dropout)(block3)
    return block3
def CC_multiscale_Resnet_2(input_shape):
	with tf.device('/gpu:0'):
		ip = Input(shape=input_shape)		
		conv3_3 = Conv2D(32,(3,3), dilation_rate = 1, activation = 'relu', padding = 'same',kernel_initializer = 'he_normal',kernel_regularizer= regularizers.l2(l2))(ip)
		conv5_5 = Conv2D(32,(3,3), dilation_rate = 2, activation = 'relu', padding = 'same',kernel_initializer = 'he_normal',kernel_regularizer= regularizers.l2(l2))(ip)
		conv7_7 = Conv2D(32,(3,3), dilation_rate = 3, activation = 'relu', padding = 'same',kernel_initializer = 'he_normal',kernel_regularizer= regularizers.l2(l2))(ip)
		conv9_9 = Conv2D(32,(3,3), dilation_rate = 4, activation = 'relu', padding = 'same',kernel_initializer = 'he_normal',kernel_regularizer= regularizers.l2(l2))(ip)		
		concat_1 = concatenate([conv3_3,conv5_5,conv7_7,conv9_9],axis = 3)
		dpt_conv = SeparableConv2D(128,(3,3),padding = 'same',activation = 'relu',kernel_initializer = 'he_normal')(concat_1)
		conv_1 = _conv_bn_relu(32, 3, 3, subsample=(1, 1),dilation = 1)(dpt_conv) # tap here
		max_pool_1 = MaxPooling2D(pool_size=(2,2),strides =(2,2))(conv_1)
		res_blk_1 = residual_units(32,init_subsample=(1, 1),dilation = 1)(max_pool_1)
		res_blk_2 = residual_units(32,init_subsample=(1, 1),dilation = 1)(res_blk_1)
		#res_blk_3 = residual_units(64,init_subsample=(1, 1),dilation = 1)(res_blk_2)
		res_blk_4 = residual_units(32,init_subsample=(1, 1),dilation = 1)(res_blk_2) # tap here
		max_pool_2 = MaxPooling2D(pool_size=(2,2),strides = (2,2))(res_blk_4)
		res_blk_5 = residual_units(64,init_subsample=(1, 1),dilation = 2)(max_pool_2)
		res_blk_6 = residual_units(64,init_subsample=(1, 1),dilation = 2)(res_blk_5)
		res_blk_7 = residual_units(64,init_subsample=(1, 1),dilation = 2)(res_blk_6)
		res_blk_8 = residual_units(64,init_subsample=(1, 1),dilation = 2)(res_blk_7)
		
		# Density map estimation
		#dm_upsample_1 = tf.image.resize_images(res_blk_8,[150,150])
		dm_upsample_1 = Lambda(resize_like,arguments={'H':150,'W':150},name = 'Lambda_1')(res_blk_8)
		dm_1_conv_1_1 = Conv2D(64,(1,1),padding = 'valid', kernel_initializer = 'he_normal')(res_blk_4)
		dm_add_1 = add([dm_upsample_1,dm_1_conv_1_1])
		dm_res_blk_1 = residual_units(64,init_subsample=(1, 1),dilation = 1)(dm_add_1)
		#dm_upsample_2 = tf.image.resize_images(dm_res_blk_1,[300,300])
		dm_upsample_2 = Lambda(resize_like,arguments={'H':300,'W':300}, name = 'Lambda_2')(dm_res_blk_1)
		dm_2_conv_1_1 = Conv2D(64,(1,1),padding = 'valid', kernel_initializer = 'he_normal')(conv_1)
		dm_add_2 = add([dm_upsample_2,dm_2_conv_1_1])
		dm_out = Conv2D(1,(1,1),kernel_initializer = 'he_normal')(dm_add_2)
		
		model = Model(inputs = [ip],outputs = [dm_out])
		return model		
def get_squeezenet_on_tif(input_shape, n_classes, **params):
    """
    """
    optimizer = '' if 'optimizer' not in params else params['optimizer']
    lr = 0.01 if 'lr' not in params else params['lr']
    loss = '' if 'loss' not in params else params['loss']
    final_activation = 'sigmoid'

    inputs = Input(input_shape)

    x = SeparableConv2D(64, (3, 3),
                        strides=(2, 2),
                        use_bias=False,
                        padding='valid',
                        name='conv1')(inputs)
    x = Activation('relu', name='relu_conv1')(x)
    x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), name='pool1')(x)

    x = fire_module(x, fire_id=2, squeeze=16, expand=64)
    x = fire_module(x, fire_id=3, squeeze=16, expand=64)
    x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), name='pool3')(x)

    x = fire_module(x, fire_id=4, squeeze=32, expand=128)
    x = fire_module(x, fire_id=5, squeeze=32, expand=128)
    x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), name='pool5')(x)

    x = fire_module(x, fire_id=6, squeeze=48, expand=192)
    x = fire_module(x, fire_id=7, squeeze=48, expand=192)
    x = fire_module(x, fire_id=8, squeeze=64, expand=256)
    x = fire_module(x, fire_id=9, squeeze=64, expand=256)
    x = Dropout(0.5, name='drop9')(x)

    name = 'conv10_%i' % n_classes
    x = Convolution2D(n_classes, (1, 1), padding='valid', name=name)(x)
    x = Activation('relu', name='relu_conv10')(x)

    x = Flatten()(x)
    x = Dense(96, activation='relu', name='d1')(x)
    x = Dropout(0.5)(x)
    x = Dense(n_classes, name='d2')(x)

    outputs = Activation(final_activation, name='tag_vector')(x)
    model = Model(inputs=inputs, outputs=outputs)

    model.name = "Separable_SqueezeNet_BN_on_tif"

    if optimizer == 'adadelta':
        opt = Adadelta(lr=lr)
    elif optimizer == 'adam':
        opt = Adam(lr=lr)
    elif optimizer == 'nadam':
        opt = Nadam(lr=lr)
    elif optimizer == 'sgd':
        opt = SGD(lr=lr, momentum=0.9, decay=0.00001, nesterov=True)
    else:
        opt = None

    if opt is not None:
        model.compile(loss=loss, optimizer=opt, metrics=[precision, recall])
    return model
Пример #19
0
def DepthwiseSeparableConvBlock(inputs,
                                n_filters,
                                weight_decay=1e-4,
                                kernel_initializer="he_normal",
                                bn_epsilon=1e-3,
                                bn_momentum=0.99):
    """ Depthwise separable convolutional block
    :param inputs: 4-D tensor, shape of (batch_size, hwight, width, channel).
    :param n_filters: int, number of filters.
    :param weight_decay: float, default 1e-4.
    :param kernel_initializer: string, default "he_normal".
    :param bn_epsilon: float, default 1e-3.
    :param bn_momentum: float, default 0.99.

    :return: 4-D tensor, shape of (batch_size, height, width, channel).
    """
    x = SeparableConv2D(inputs, (3, 3),
                        activation=None,
                        padding="same",
                        depth_multiplier=1,
                        kernel_regularizer=l2(weight_decay),
                        kernel_initializer=kernel_initializer)(inputs)
    x = BatchNormalization(epsilon=bn_epsilon, momentum=bn_momentum)(x)
    x = Activation("relu")(x)
    x = Conv2D(n_filters, (1, 1),
               activation=None,
               kernel_regularizer=l2(weight_decay),
               kernel_initializer=kernel_initializer)(x)
    x = BatchNormalization(epsilon=bn_epsilon, momentum=bn_momentum)(x)
    x = Activation("relu")(x)
    return x
def depthwise_conv_layer(inputs,
                         name,
                         n_kernels=32,
                         kernel_size=(3, 3),
                         dropout=0.0,
                         dilation_rate=1,
                         padding='same',
                         depth_multiplier=1,
                         enable_transition=False,
                         transition_layer_kernels=32,
                         strides=1,
                         spatial_dropout=0.0,
                         bn=True,
                         bn_zero_gamma=False):
    inputs = concat_s2d(inputs)
    inputs = transition_layer(
        inputs, name +
        "_tran", transition_layer_kernels) if enable_transition else inputs
    out = SeparableConv2D(n_kernels,
                          kernel_size=kernel_size,
                          strides=strides,
                          padding=padding,
                          kernel_regularizer=l2(1e-4),
                          dilation_rate=dilation_rate,
                          depth_multiplier=depth_multiplier,
                          name=name + "sep-conv_")(inputs)
    if bn:
        out = BatchNormalization(name=name + "bn_", gamma_initializer='zeros')(
            out) if bn_zero_gamma else BatchNormalization(name=name +
                                                          "bn_")(out)
    out = Activation("relu", name=name + "activation_")(out)
    out = Dropout(dropout, name=name + "dropout_")(out) if dropout > 0 else out
    out = SpatialDropout2D(spatial_dropout)(
        out) if spatial_dropout > 0 else out
    return out
Пример #21
0
def Discriminator(input_shape, generator_shape, kernel_depth, kernel_size=5):
    real_input = Input(shape=input_shape)
    generator_input = Input(shape=generator_shape)    
    input = Concatenate()([real_input, generator_input])
   
    conv_seperable = TimeDistributed(SeparableConv2D(kernel_depth, kernel_size, padding='same'))(input)
    conv_seperable = Activation('tanh')(conv_seperable)
    conv_seperable = TimeDistributed(BatchNormalization())(conv_seperable)

    conv_128 = conv_layer(conv_seperable, kernel_depth, kernel_size)
    pool_64 = TimeDistributed(MaxPooling2D())(conv_128)

    conv_64 = conv_layer(pool_64, 2 * kernel_depth, kernel_size)
    pool_32 = TimeDistributed(MaxPooling2D())(conv_64)

    conv_32 = conv_layer(pool_32, 2 * kernel_depth, kernel_size)
    pool_16 = TimeDistributed(MaxPooling2D())(conv_32)

    conv_16 = conv_layer(pool_16, 4 * kernel_depth, kernel_size)
    pool_8 = TimeDistributed(MaxPooling2D())(conv_16)

    conv_8 = lstm_layer(pool_8, 4 * kernel_depth, kernel_size)
    
    x = Flatten()(conv_8)
    x = Dense(2, activation="softmax")(x)
    
    model = Model([real_input, generator_input], x, name="Discriminator")
    return model
Пример #22
0
def __regular_residual_op(x, num_filters, weight_decay=1E-4):
    ''' Apply Relu, 1x1 Conv2D, BN, Relu, 3x3 ConvSeparable2D, BN for residual output
    Args:
        ip: keras input tensor
        num_filters: number of filters
        weight_decay: weight decay factor
        
    Returns: keras output tensor (representing only residual term)
    '''

    concat_axis = 1 if K.image_data_format() == 'channels_first' else -1

    x = Activation('relu')(x)
    x = Conv2D(int(num_filters // 2), (1, 1),
               kernel_initializer='he_uniform',
               padding='same',
               use_bias=True,
               kernel_regularizer=l2(weight_decay))(x)
    x = BatchNormalization(axis=concat_axis,
                           gamma_regularizer=l2(weight_decay),
                           beta_regularizer=l2(weight_decay))(x)

    x = Activation('relu')(x)
    x = SeparableConv2D(num_filters, (3, 3),
                        kernel_initializer='he_uniform',
                        padding='same',
                        use_bias=True,
                        kernel_regularizer=l2(weight_decay))(x)
    x = BatchNormalization(axis=concat_axis,
                           gamma_regularizer=l2(weight_decay),
                           beta_regularizer=l2(weight_decay))(x)

    return x
Пример #23
0
def __conv_block(ip,
                 nb_filter,
                 smaller_filters=False,
                 weight_decay=DEFAULT_WEIGHT_DECAY):
    ''' Apply BatchNorm, Relu, 3x3 Conv2D for downsize and then two distinct separable conv ops
    Args:
        ip: input keras tensor
        nb_filter: final number of filters to output
        smaller_filters: flag for trigerring a 2x2 separable conv (instead of a 5x5 one)
        weight_decay: weight decay factor
        
    Returns: keras tensor with after block using two filter sizes
    '''

    concat_axis = 1 if K.image_data_format() == 'channels_first' else -1

    if smaller_filters:
        alt_filter_size = 2
    else:
        alt_filter_size = 5

    # Reduce channel size of the input layer, but more so, add a non-linearity and combine all features
    x = BatchNormalization(axis=concat_axis,
                           gamma_regularizer=l2(weight_decay),
                           beta_regularizer=l2(weight_decay))(ip)
    x = Activation('relu')(x)
    x = Conv2D(int(nb_filter // 3), (1, 1),
               kernel_initializer='he_uniform',
               padding='same',
               use_bias=True,
               kernel_regularizer=l2(weight_decay))(x)

    # In the vein of NAS convolutional cells, perform two different sized seperable convolutions and add them
    x = BatchNormalization(axis=concat_axis,
                           gamma_regularizer=l2(weight_decay),
                           beta_regularizer=l2(weight_decay))(x)
    x = Activation('relu')(x)
    x3by3 = SeparableConv2D(nb_filter, (3, 3), padding='same',
                            use_bias=True)(x)
    x_other = SeparableConv2D(nb_filter, (alt_filter_size, alt_filter_size),
                              padding='same',
                              use_bias=True)(x)

    output = add([x3by3, x_other])

    # Return output layer
    return output
Пример #24
0
    def residual_model(data,
                       kernels,
                       strides,
                       chanDim,
                       reduced=False,
                       reg=0.0001,
                       epsilon=2e-5,
                       mom=0.9):
        shortcut = data

        bn1 = BatchNormalization(axis=chanDim, epsilon=epsilon,
                                 momentum=mom)(data)
        act1 = Activation("relu")(bn1)
        conv1 = SeparableConv2D(kernels, (3, 3),
                                padding='same',
                                strides=strides,
                                use_bias=False,
                                depthwise_regularizer=l2(reg))(act1)

        bn2 = BatchNormalization(axis=chanDim, epsilon=epsilon,
                                 momentum=mom)(conv1)
        act2 = Activation("relu")(bn2)
        conv2 = SeparableConv2D(kernels, (3, 3),
                                padding='same',
                                strides=strides,
                                use_bias=False,
                                depthwise_regularizer=l2(reg))(act2)

        input_shape = K.int_shape(data)
        residual_shape = K.int_shape(conv2)
        stride_width = int(round(input_shape[1] / residual_shape[1]))
        stride_height = int(round(input_shape[2] / residual_shape[2]))
        equal_channels = input_shape[3] == residual_shape[3]

        shortcut = act0
        # 1 X 1 conv if shape is different. Else identity.
        if stride_width > 1 or stride_height > 1 or not equal_channels:
            shortcut = Conv2D(filters=residual_shape[3],
                              kernel_size=(1, 1),
                              strides=(stride_width, stride_height),
                              padding="valid",
                              kernel_initializer="he_normal",
                              kernel_regularizer=l2(0.0001))(act0)

        x = add([conv2, shortcut])

        return x
def SynapticNeuronUnit(dendrites, filter_size, kernel_size, CRP, d_rate,
                       use_STR):

    if CRP[1] == 'UpSampling':
        dendrites = UpSampling2D(interpolation='bilinear')(dendrites)

    # Synaptic Transmission Regulator, STR, calculates weight and bias for each channel of input tensor
    if use_STR: neuro_potential = SynapticTransmissionRegulator()(dendrites)
    else: neuro_potential = dendrites

    # Main neural potential
    if CRP[0] == 'Normal':
        neuro_potential = Conv2D(filters=filter_size,
                                 kernel_size=kernel_size,
                                 padding=CRP[2],
                                 kernel_initializer='he_uniform',
                                 use_bias=False)(neuro_potential)

    elif CRP[0] == 'Transpose':
        neuro_potential = Conv2DTranspose(filters=filter_size,
                                          kernel_size=kernel_size,
                                          padding=CRP[2],
                                          kernel_initializer='he_uniform',
                                          use_bias=False)(neuro_potential)

    elif CRP[0] == 'Separable':
        neuro_potential = SeparableConv2D(filters=filter_size,
                                          kernel_size=kernel_size,
                                          padding=CRP[2],
                                          depthwise_initializer='he_uniform',
                                          pointwise_initializer='he_uniform',
                                          use_bias=False)(neuro_potential)

    elif CRP[0] == 'Atrous':
        neuro_potential = Conv2D(filters=filter_size,
                                 kernel_size=kernel_size,
                                 strides=2,
                                 padding=CRP[2],
                                 kernel_initializer='he_uniform',
                                 use_bias=False)(neuro_potential)
        neuro_potential = ZeroPadding2D(padding=((1, 0), (1,
                                                          0)))(neuro_potential)

    else:
        neuro_potential = None  # Will be error

    neuro_potential = BatchNormalization(momentum=0.95)(neuro_potential)
    neuro_potential = ParametricSwish()(neuro_potential)

    # Output potential to axons
    if CRP[1] == 'MaxPooling':
        neuro_potential = MaxPooling2D()(neuro_potential)

    if d_rate[0] > 0.0:
        neuro_potential = GaussianDropout(rate=d_rate[0])(neuro_potential)
    if d_rate[1] > 0.0:
        neuro_potential = SpatialDropout2D(rate=d_rate[1])(neuro_potential)

    return neuro_potential
Пример #26
0
def __transition_block(prev_ip, cur_ip, new_nb_filter, weight_decay=DEFAULT_WEIGHT_DECAY):
    ''' Apply BatchNorm, Relu, Conv2D to the last two transition inputs and then combine to downsize
    Args:
        prev_ip: keras tensor (e.g. input of current block)
        cur_ip: keras tensor (e.g. output of current block)
        new_nb_filter: new number of filters
        weight_decay: weight decay factor
        
    Returns: keras tensor after applying transition operations on both inputs
    '''

    concat_axis = 1 if K.image_data_format() == 'channels_first' else -1

    if prev_ip is None or not K.is_keras_tensor(prev_ip):
        prev_ip = cur_ip
    
    # Apply operations to block input and output to reduce between any arbitary layer in the model
    x1 = BatchNormalization(axis=concat_axis, gamma_regularizer=l2(weight_decay),
                           beta_regularizer=l2(weight_decay))(prev_ip)
    x1 = Activation('relu')(x1)
    x1 = Conv2D(new_nb_filter, (1, 1), kernel_initializer='he_uniform', padding='same', use_bias=True,
               kernel_regularizer=l2(weight_decay))(x1)    
    x1 = BatchNormalization(axis=concat_axis, gamma_regularizer=l2(weight_decay),
                           beta_regularizer=l2(weight_decay))(x1)
    x1 = Activation('relu')(x1)
    x1 = SeparableConv2D(new_nb_filter, (5, 5), kernel_initializer='he_uniform', padding='same', use_bias=True,
               kernel_regularizer=l2(weight_decay))(x1)
    x1 = AveragePooling2D((2, 2), strides=(2, 2))(x1)
    
    
    x2 = BatchNormalization(axis=concat_axis, gamma_regularizer=l2(weight_decay),
                           beta_regularizer=l2(weight_decay))(cur_ip)
    x2 = Activation('relu')(x2)
    x2 = Conv2D(new_nb_filter, (1, 1), kernel_initializer='he_uniform', padding='same', use_bias=True,
               kernel_regularizer=l2(weight_decay))(x2)        
    x2 = BatchNormalization(axis=concat_axis, gamma_regularizer=l2(weight_decay),
                           beta_regularizer=l2(weight_decay))(x2)
    x2 = Activation('relu')(x2)                   
    x2 = SeparableConv2D(new_nb_filter, (3, 3), strides=(2, 2), padding='same', use_bias=True)(x2)
    
    # Add them together and output them
    output = add([x1, x2])
    
    
    # Return output layer
    return output
Пример #27
0
def separable_conv2d_batchnorm(input_layer, filters, strides=1):
    output_layer = SeparableConv2D(filters=filters,
                                   kernel_size=3,
                                   strides=strides,
                                   padding='same',
                                   activation='relu')(input_layer)
    output_layer = BatchNormalization()(output_layer)
    return output_layer
Пример #28
0
def seperableConv_bottleneck_block_with_se(input, filters=64, cardinality=8, strides=1, weight_decay=5e-4):
    ''' Adds a bottleneck block
    Args:
        input: input tensor
        filters: number of output filters
        cardinality: cardinality factor described number of
            grouped convolutions
        strides: performs strided convolution for downsampling if > 1
        weight_decay: weight decay factor
    Returns: a keras tensor
    '''
    init = input

    grouped_channels = int(filters / cardinality)
    channel_axis = 1 if K.image_data_format() == 'channels_first' else -1

    # Check if input number of filters is same as 16 * k, else create convolution2d for this input
    if K.image_data_format() == 'channels_first':
        if init._keras_shape[1] != 2 * filters:
            init = SeparableConv2D(filters, (1, 1), padding='same', strides=(strides, strides),
                          use_bias=False, kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(init)
            init = BatchNormalization(axis=channel_axis)(init)
    else:
        if init._keras_shape[-1] != 2 * filters:
            init = SeparableConv2D(filters, (1, 1), padding='same', strides=(strides, strides),
                          use_bias=False, kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(init)
            init = BatchNormalization(axis=channel_axis)(init)

    x = SeparableConv2D(filters, (1, 1), padding='same', use_bias=False,
               kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(input)
    x = BatchNormalization(axis=channel_axis)(x)
    x = LeakyReLU()(x)

    x = grouped_SeperableConvolution_block(x, grouped_channels, cardinality, strides, weight_decay)

    x = SeparableConv2D(filters, (1, 1), padding='same', use_bias=False, kernel_initializer='he_normal', #
               kernel_regularizer=l2(weight_decay))(x)
    x = BatchNormalization(axis=channel_axis)(x)

    # squeeze and excite block
    x = squeeze_excite_block(x)

    x = add([init, x])
    x = LeakyReLU()(x)

    return x
Пример #29
0
def EEGNet_Classifier_new(nb_classes,
                          Chans=64,
                          Samples=128,
                          regRate=0.0001,
                          dropoutRate=0.25,
                          kernLength=fs // 2,
                          numFilters=8,
                          numSpatialFliters=1):
    # kernlenth的取值为采样频率的一半

    input1 = Input(shape=(1, Chans, Samples))

    ##################################################################
    layer1 = Conv2D(
        numFilters,
        (1, kernLength),
        padding='same',  # temporal kernel
        kernel_constraint=maxnorm(MAX_NORM, axis=[0, 1, 2]),
        input_shape=(1, Chans, Samples),  # channels_first 
        use_bias=False)(input1)  # output_size [F, C, T]
    layer1 = BatchNormalization(axis=1)(
        layer1
    )  # bn_axis = 1 if K.image_data_format() == 'channels_first' else 3
    layer1 = DepthwiseConv2D(
        (Chans, 1),
        padding='valid',  # spatial filters within each feature map
        depth_multiplier=numSpatialFliters,
        depthwise_constraint=maxnorm(MAX_NORM, axis=[0, 1, 2]),
        use_bias=False)(layer1)
    layer1 = BatchNormalization(axis=1)(layer1)
    layer1 = Activation('elu')(layer1)  # output_size [D*F, 1, T]
    layer1 = AveragePooling2D((1, 4))(layer1)  # output_size [D*F, 1, T//4]
    layer1 = Dropout(dropoutRate)(
        layer1)  # SpatialDropout2D(dropoutRate)(layer1)

    layer2 = SeparableConv2D(
        filters=numFilters * numSpatialFliters,
        padding='same',  # equal to DepthwiseConv2D + 1*1-conv2d
        kernel_size=(1, 16),
        depth_multiplier=1,
        depthwise_constraint=maxnorm(MAX_NORM, axis=[0, 1, 2]),
        pointwise_initializer=maxnorm(MAX_NORM, axis=[0, 1, 2]),
        use_bias=False)(layer1)
    layer2 = BatchNormalization(axis=1)(layer2)
    layer2 = Activation('elu')(layer2)  # output_size [D*F, 1, T//4]
    layer2 = AveragePooling2D((1, 8))(layer2)
    layer2 = Dropout(dropoutRate)(
        layer1
    )  # SpatialDropout2D(dropoutRate)(layer2)            # output_size [D*F, 1, T//32]

    flatten = Flatten(name='flatten')(layer2)

    dense = Dense(nb_classes,
                  name='dense',
                  kernel_constraint=maxnorm(0.25, axis=0))(flatten)
    softmax = Activation('softmax', name='softmax')(dense)

    return Model(inputs=input1, outputs=softmax)
def cnn_model_old_separable():
    model = Sequential()

    model.add(
        SeparableConv2D(32, (3, 3),
                        padding='same',
                        input_shape=(IMG_SIZE, IMG_SIZE, 3),
                        activation='relu'))
    #model.add(layers.BatchNormalization())#Nueva
    model.add(SeparableConv2D(32, (3, 3), padding='same', activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.2))  #antes 0.2

    model.add(SeparableConv2D(64, (3, 3), padding='same', activation='relu'))
    model.add(SeparableConv2D(64, (3, 3), padding='same', activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.2))  #antes 0.2

    model.add(SeparableConv2D(128, (3, 3), padding='same', activation='relu'))
    model.add(SeparableConv2D(128, (3, 3), padding='same', activation='relu'))
    model.add(SeparableConv2D(128, (3, 3), padding='same',
                              activation='relu'))  #Añadida
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.2))  #antes 0.2

    model.add(Flatten())
    #model.add(GlobalAveragePooling2D())
    model.add(Dense(1024, activation='relu'))  #Antes 512
    model.add(Dropout(0.5))
    model.add(Dense(NUM_CLASSES, activation='softmax'))
    return model