Exemplo n.º 1
0
def InvertedResidualBlock(x, expand, out_channels, repeats, stride, weight_decay, block_id):
    '''
    This function defines a sequence of 1 or more identical layers, referring to Table 2 in the original paper.
    :param x: Input Keras tensor in (B, H, W, C_in)
    :param expand: expansion factor in bottlenect residual block
    :param out_channels: number of channels in the output tensor
    :param repeats: number of times to repeat the inverted residual blocks including the one that changes the dimensions.
    :param stride: stride for the 1x1 convolution
    :param weight_decay: hyperparameter for the l2 penalty
    :param block_id: as its name tells
    :return: Output tensor (B, H_new, W_new, out_channels)
    '''
    channel_axis = -1
    in_channels = K.int_shape(x)[channel_axis]
    x = Conv2D(expand * in_channels, 1, padding='same', strides=stride, use_bias=False,
                kernel_regularizer=l2(weight_decay), name='conv_%d_0' % block_id)(x)
    x = BatchNormalization(epsilon=1e-5, momentum=0.9, name='conv_%d_0_bn' % block_id)(x)
    x = Relu6(x, name='conv_%d_0_act_1' % block_id)
    x = DepthwiseConv2D((3, 3),
                        padding='same',
                        depth_multiplier=1,
                        strides=1,
                        use_bias=False,
                        kernel_regularizer=l2(weight_decay),
                        name='conv_dw_%d_0' % block_id )(x)
    x = BatchNormalization(axis=channel_axis, epsilon=1e-5, momentum=0.9, name='conv_dw_%d_0_bn' % block_id)(x)
    x = Relu6(x, name='conv_%d_0_act_2' % block_id)
    x = Conv2D(out_channels, 1, padding='same', strides=1, use_bias=False,
               kernel_regularizer=l2(weight_decay), name='conv_bottleneck_%d_0' % block_id)(x)
    x = BatchNormalization(axis=channel_axis, epsilon=1e-5, momentum=0.9, name='conv_bottlenet_%d_0_bn' % block_id)(x)

    for i in xrange(1, repeats):
        x1 = Conv2D(expand*out_channels, 1, padding='same', strides=1, use_bias=False,
                    kernel_regularizer=l2(weight_decay), name='conv_%d_%d' % (block_id, i))(x)
        x1 = BatchNormalization(axis=channel_axis, epsilon=1e-5,momentum=0.9,name='conv_%d_%d_bn' % (block_id, i))(x1)
        x1 = Relu6(x1,name='conv_%d_%d_act_1' % (block_id, i))
        x1 = DepthwiseConv2D((3, 3),
                            padding='same',
                            depth_multiplier=1,
                            strides=1,
                            use_bias=False,
                            kernel_regularizer=l2(weight_decay),
                            name='conv_dw_%d_%d' % (block_id, i))(x1)
        x1 = BatchNormalization(axis=channel_axis, epsilon=1e-5,momentum=0.9, name='conv_dw_%d_%d_bn' % (block_id, i))(x1)
        x1 = Relu6(x1, name='conv_dw_%d_%d_act_2' % (block_id, i))
        x1 = Conv2D(out_channels, 1, padding='same', strides=1, use_bias=False,
                    kernel_regularizer=l2(weight_decay),name='conv_bottleneck_%d_%d' % (block_id, i))(x1)
        x1 = BatchNormalization(axis=channel_axis, epsilon=1e-5, momentum=0.9, name='conv_bottlenet_%d_%d_bn' % (block_id, i))(x1)
        x = add([x, x1], name='block_%d_%d_output' % (block_id, i))
    return x
Exemplo n.º 2
0
def _bottleneck(inputs, filters, kernel, t, s, r=False):
    """Bottleneck
    This function defines a basic bottleneck structure.
    # Arguments
        inputs: Tensor, input tensor of conv layer.
        filters: Integer, the dimensionality of the output space.
        kernel: An integer or tuple/list of 2 integers, specifying the
            width and height of the 2D convolution window.
        t: Integer, expansion factor.
            t is always applied to the input size.
        s: An integer or tuple/list of 2 integers,specifying the strides
            of the convolution along the width and height.Can be a single
            integer to specify the same value for all spatial dimensions.
        r: Boolean, Whether to use the residuals.
    # Returns
        Output tensor.
    """

    channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
    tchannel = K.int_shape(inputs)[channel_axis] * t

    x = _conv_block(inputs, tchannel, (1, 1), (1, 1))

    x = DepthwiseConv2D(kernel, strides=(s, s), depth_multiplier=1, padding='same')(x)
    x = BatchNormalization(axis=channel_axis)(x)
    x = Activation(relu6)(x)

    x = Conv2D(filters, (1, 1), strides=(1, 1), padding='same')(x)
    x = BatchNormalization(axis=channel_axis)(x)

    if r:
        x = add([x, inputs])
    return x
def _depthwise_conv_block(inputs, pointwise_conv_filters, alpha,
                          depth_multiplier=1, strides=(1, 1), block_id=1,
                          dilation_rate=1):
    """Adds a depthwise convolution block.

    A depthwise convolution block consists of a depthwise conv,
    batch normalization, relu6, pointwise convolution,
    batch normalization and relu6 activation.
    DONE(see--): Allow dilated depthwise convolutions
    """
    channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
    pointwise_conv_filters = int(pointwise_conv_filters * alpha)

    x = DepthwiseConv2D((3, 3),
                        padding='same',
                        depth_multiplier=depth_multiplier,
                        strides=strides,
                        use_bias=False,
                        name='conv_dw_%d' % block_id,
                        dilation_rate=dilation_rate)(inputs)
    x = BatchNormalization(
        axis=channel_axis, name='conv_dw_%d_bn' % block_id)(x)
    x = Activation(relu6, name='conv_dw_%d_relu' % block_id)(x)

    x = Conv2D(pointwise_conv_filters, (1, 1),
               padding='same',
               use_bias=False,
               strides=(1, 1),
               name='conv_pw_%d' % block_id)(x)
    x = BatchNormalization(
        axis=channel_axis, name='conv_pw_%d_bn' % block_id)(x)
    return Activation(relu6, name='conv_pw_%d_relu' % block_id)(x)
Exemplo n.º 4
0
def separable_conv(x, c_o, kernel,stride, name, relu=True):
    global global_layers
    global_layers.append(name)

    x = DepthwiseConv2D(kernel
        , strides=stride
        , padding='same'
        , use_bias=False
        , depthwise_regularizer=l2(0.00004)
        , name=name+'_depthwise'
    )(x)

    x = Conv2D(c_o,(1,1)
        ,strides=1
        ,use_bias=False
        ,padding='same'
        ,kernel_regularizer=l2(0.004)
        ,name=name+"_pointwise"
    )(x)

    x = BatchNormalization(scale=True, name=name+'_bn')(x,training=False)
    if relu:
        x = Activation('relu', name=name+'_relu')(x)
    
    return x
Exemplo n.º 5
0
def _inverted_res_block(inputs,
                        expansion,
                        stride,
                        alpha,
                        filters,
                        block_id,
                        skip_connection,
                        rate=1):
    in_channels = inputs.shape[-1].value  # inputs._keras_shape[-1]
    pointwise_conv_filters = int(filters * alpha)
    pointwise_filters = _make_divisible(pointwise_conv_filters, 8)
    x = inputs
    prefix = 'expanded_conv_{}_'.format(block_id)
    if block_id:
        # Expand

        x = Conv2D(expansion * in_channels,
                   kernel_size=1,
                   padding='same',
                   use_bias=False,
                   activation=None,
                   name=prefix + 'expand')(x)
        x = BatchNormalization(epsilon=1e-3,
                               momentum=0.999,
                               name=prefix + 'expand_BN')(x)
        x = Activation(relu6, name=prefix + 'expand_relu')(x)
    else:
        prefix = 'expanded_conv_'
    # Depthwise
    x = DepthwiseConv2D(kernel_size=3,
                        strides=stride,
                        activation=None,
                        use_bias=False,
                        padding='same',
                        dilation_rate=(rate, rate),
                        name=prefix + 'depthwise')(x)
    x = BatchNormalization(epsilon=1e-3,
                           momentum=0.999,
                           name=prefix + 'depthwise_BN')(x)

    x = Activation(relu6, name=prefix + 'depthwise_relu')(x)

    # Project
    x = Conv2D(pointwise_filters,
               kernel_size=1,
               padding='same',
               use_bias=False,
               activation=None,
               name=prefix + 'project')(x)
    x = BatchNormalization(epsilon=1e-3,
                           momentum=0.999,
                           name=prefix + 'project_BN')(x)

    if skip_connection:
        return Add(name=prefix + 'add')([inputs, x])

    # if in_channels == pointwise_filters and stride == 1:
    #    return Add(name='res_connect_' + str(block_id))([inputs, x])

    return x
Exemplo n.º 6
0
def _bottleneck(inputs, filters, kernel, t, s, r=False):
    """
    defines the basic bottleneck structure

    :param inputs: Tensor, input tensor of convolution layer
    :param filters: Integer, the depth of output space
    :param kernel: An integer or tuple/list of 2 integers, kernel's width and height
    :param t: Integer, expansion factor, always apply to the input size
    :param s: An integer or tuple/list of 2 integers, strides along width and height
    :param r: Boolean, whether to use the residuals
    :return: Output tensor
    """

    channel_axis = 1 if K.image_data_format() == 'channel_first' else -1
    tchannel = K.int_shape(inputs)[channel_axis] * t

    x = _conv_block(inputs, tchannel, (1, 1), (1, 1))
    x = DepthwiseConv2D(kernel,
                        strides=(s, s),
                        depth_multiplier=1,
                        padding='same')(x)
    x = BatchNormalization(axis=channel_axis)(x)
    x = Activation(relu6)(x)

    x = Conv2D(filters, (1, 1), strides=(1, 1), padding='same')(x)
    x = BatchNormalization(axis=channel_axis)(x)

    if r:
        x = add([x, inputs])

    return x
Exemplo n.º 7
0
def EEGNet_Classifier_new(nb_classes,
                          Chans=64,
                          Samples=128,
                          regRate=0.0001,
                          dropoutRate=0.25,
                          kernLength=fs // 2,
                          numFilters=8,
                          numSpatialFliters=1):
    # kernlenth的取值为采样频率的一半

    input1 = Input(shape=(1, Chans, Samples))

    ##################################################################
    layer1 = Conv2D(
        numFilters,
        (1, kernLength),
        padding='same',  # temporal kernel
        kernel_constraint=maxnorm(MAX_NORM, axis=[0, 1, 2]),
        input_shape=(1, Chans, Samples),  # channels_first 
        use_bias=False)(input1)  # output_size [F, C, T]
    layer1 = BatchNormalization(axis=1)(
        layer1
    )  # bn_axis = 1 if K.image_data_format() == 'channels_first' else 3
    layer1 = DepthwiseConv2D(
        (Chans, 1),
        padding='valid',  # spatial filters within each feature map
        depth_multiplier=numSpatialFliters,
        depthwise_constraint=maxnorm(MAX_NORM, axis=[0, 1, 2]),
        use_bias=False)(layer1)
    layer1 = BatchNormalization(axis=1)(layer1)
    layer1 = Activation('elu')(layer1)  # output_size [D*F, 1, T]
    layer1 = AveragePooling2D((1, 4))(layer1)  # output_size [D*F, 1, T//4]
    layer1 = Dropout(dropoutRate)(
        layer1)  # SpatialDropout2D(dropoutRate)(layer1)

    layer2 = SeparableConv2D(
        filters=numFilters * numSpatialFliters,
        padding='same',  # equal to DepthwiseConv2D + 1*1-conv2d
        kernel_size=(1, 16),
        depth_multiplier=1,
        depthwise_constraint=maxnorm(MAX_NORM, axis=[0, 1, 2]),
        pointwise_initializer=maxnorm(MAX_NORM, axis=[0, 1, 2]),
        use_bias=False)(layer1)
    layer2 = BatchNormalization(axis=1)(layer2)
    layer2 = Activation('elu')(layer2)  # output_size [D*F, 1, T//4]
    layer2 = AveragePooling2D((1, 8))(layer2)
    layer2 = Dropout(dropoutRate)(
        layer1
    )  # SpatialDropout2D(dropoutRate)(layer2)            # output_size [D*F, 1, T//32]

    flatten = Flatten(name='flatten')(layer2)

    dense = Dense(nb_classes,
                  name='dense',
                  kernel_constraint=maxnorm(0.25, axis=0))(flatten)
    softmax = Activation('softmax', name='softmax')(dense)

    return Model(inputs=input1, outputs=softmax)
Exemplo n.º 8
0
def depth_conv_block(model, d, k, s):
    model.add(
        DepthwiseConv2D((k, k), strides=(s, s), padding='same',
                        use_bias=False))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Conv2D(d, (1, 1), padding='same', use_bias=False))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    return model
Exemplo n.º 9
0
def EEGNet_SSVEP(nb_classes, Chans = 64, Samples = 128, regRate = 0.0001,
           dropoutRate = 0.25, kernLength = 64, numFilters = 8):
    """ Keras Implementation of the variant of EEGNet that was used to classify
    signals from an SSVEP task (https://arxiv.org/abs/1803.04566)

       
    Inputs:
        
        nb_classes     : int, number of classes to classify
        Chans, Samples : number of channels and time points in the EEG data
        regRate        : regularization parameter for L1 and L2 penalties
        dropoutRate    : dropout fraction
        kernLength     : length of temporal convolution in first layer
        numFilters     : number of temporal-spatial filter pairs to learn
    
    """

    input1   = Input(shape = (1, Chans, Samples))

    ##################################################################
    layer1       = Conv2D(numFilters, (1, kernLength), padding = 'same',
                          kernel_regularizer = l1_l2(l1=0.0, l2=0.0),
                          input_shape = (1, Chans, Samples),
                          use_bias = False)(input1)
    layer1       = BatchNormalization(axis = 1)(layer1)
    layer1       = DepthwiseConv2D((Chans, 1), 
                              depthwise_regularizer = l1_l2(l1=regRate, l2=regRate),
                              use_bias = False)(layer1)
    layer1       = BatchNormalization(axis = 1)(layer1)
    layer1       = Activation('elu')(layer1)
    layer1       = SpatialDropout2D(dropoutRate)(layer1)
    
    layer2       = SeparableConv2D(numFilters, (1, 8), 
                              depthwise_regularizer=l1_l2(l1=0.0, l2=regRate),
                              use_bias = False, padding = 'same')(layer1)
    layer2       = BatchNormalization(axis=1)(layer2)
    layer2       = Activation('elu')(layer2)
    layer2       = AveragePooling2D((1, 4))(layer2)
    layer2       = SpatialDropout2D(dropoutRate)(layer2)
    
    layer3       = SeparableConv2D(numFilters*2, (1, 8), depth_multiplier = 2,
                              depthwise_regularizer=l1_l2(l1=0.0, l2=regRate), 
                              use_bias = False, padding = 'same')(layer2)
    layer3       = BatchNormalization(axis=1)(layer3)
    layer3       = Activation('elu')(layer3)
    layer3       = AveragePooling2D((1, 4))(layer3)
    layer3       = SpatialDropout2D(dropoutRate)(layer3)
    
    
    flatten      = Flatten(name = 'flatten')(layer3)
    
    dense        = Dense(nb_classes, name = 'dense')(flatten)
    softmax      = Activation('softmax', name = 'softmax')(dense)
    
    return Model(inputs=input1, outputs=softmax)
Exemplo n.º 10
0
def _depthwise_separable_conv_block(x, pointwise_conv_channels, alpha=1.0, depth_multiplier=1, strides=1, block_id=1):
    name = 'conv_block_{}'.format(block_id)
    pointwise_conv_channels = int(alpha*pointwise_conv_channels)

    x = DepthwiseConv2D((3, 3), padding='same', depth_multiplier=depth_multiplier, strides=strides, use_bias=False, name=name+'_dw_conv')(x)
    x = BatchNormalization(name=name+'_dw_bn')(x)
    x = Activation('relu', name=name+'_dw_relu')(x)
    x = Conv2D(pointwise_conv_channels, (1, 1), padding='same', strides=1, use_bias=False, name=name+'_pw_conv')(x)
    x = BatchNormalization(name=name+'_pw_bn')(x)
    x = Activation('relu', name=name+'_pw_relu')(x)
    return x
def bottleneck(x, f, size, stride):
    x = DepthwiseConv2D(
        kernel_size=(size, size),
        strides=(stride, stride),
        padding='same',
        depth_multiplier=1,
        depthwise_regularizer=keras.regularizers.l2(1e-6),
        depthwise_initializer=keras.initializers.glorot_uniform(seed=0))(x)
    x = BatchNormalization(axis=-1)(x)
    x = Activation(relu6)(x)
    x = conv(x, filters=f, kernel_size=(3, 3), strides=(1, 1))
    return x
Exemplo n.º 12
0
def layer(inputs, kernel, step):
    x = Conv2D(kernel,
               kernel_size=(1, 1),
               strides=(step, step),
               padding='same')(inputs)
    x = BatchNormalization(axis=1)(x)
    x = Activation(relu6)(x)
    x = DepthwiseConv2D((3, 3), strides=(1, 1), padding='same',
                        use_bias=False)(x)
    x = BatchNormalization(axis=1)(x)
    x = Activation(relu6)(x)
    return x
Exemplo n.º 13
0
def SepConv_BN(x,
               filters,
               prefix,
               stride=1,
               kernel_size=3,
               rate=1,
               depth_activation=False,
               epsilon=1e-3):
    """ SepConv with BN between depthwise & pointwise. Optionally add activation after BN
        Implements right "same" padding for even kernel sizes
        Args:
            x: input tensor
            filters: num of filters in pointwise convolution
            prefix: prefix before name
            stride: stride at depthwise conv
            kernel_size: kernel size for depthwise convolution
            rate: atrous rate for depthwise convolution
            depth_activation: flag to use activation between depthwise & poinwise convs
            epsilon: epsilon to use in BN layer
    """

    if stride == 1:
        depth_padding = 'same'
    else:
        kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
        pad_total = kernel_size_effective - 1
        pad_beg = pad_total // 2
        pad_end = pad_total - pad_beg
        x = ZeroPadding2D((pad_beg, pad_end))(x)
        depth_padding = 'valid'

    if not depth_activation:
        x = Activation('relu')(x)
    x = DepthwiseConv2D((kernel_size, kernel_size),
                        strides=(stride, stride),
                        dilation_rate=(rate, rate),
                        padding=depth_padding,
                        use_bias=False,
                        name=prefix + '_depthwise')(x)
    x = BatchNormalization(name=prefix + '_depthwise_BN', epsilon=epsilon)(x)
    if depth_activation:
        x = Activation('relu')(x)
    x = Conv2D(filters, (1, 1),
               padding='same',
               use_bias=False,
               name=prefix + '_pointwise')(x)
    x = BatchNormalization(name=prefix + '_pointwise_BN', epsilon=epsilon)(x)
    if depth_activation:
        x = Activation('relu')(x)

    return x
Exemplo n.º 14
0
def _bottleneck(inputs, filters, kernel, t, s, r=False):
    channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
    tchannel = K.int_shape(inputs)[channel_axis] * t
    x = _conv_block(inputs, tchannel, (1, 1), (1, 1))
    x = DepthwiseConv2D(kernel,
                        strides=(s, s),
                        depth_multiplier=1,
                        padding='same')(x)
    x = BatchNormalization(axis=channel_axis)(x)
    x = Activation(relu6)(x)
    x = Conv2D(filters, (1, 1), strides=(1, 1), padding='same')(x)
    x = BatchNormalization(axis=channel_axis)(x)
    if r:
        x = add([x, inputs])
Exemplo n.º 15
0
def bottleneck(input,filter,stride,t,con=False):
    # channel=K.int_shape(input)[-1]*t
    x=my_conv(filters=filter,strides=(1,1))(input)      #channel设置
    x=layers.BatchNormalization(axis=-1)(x)
    x=Activation(relu6)(x)

    x=DepthwiseConv2D((3,3),strides=(stride,stride),depthwise_regularizer=l2(1e-6),padding='same',depth_multiplier=t)(x)
    x=layers.BatchNormalization(axis=-1)(x)
    x=Activation(relu6)(x)

    x=my_conv(filters=filter,strides=(1,1))(x)
    x=layers.BatchNormalization(axis=-1)(x)
    if con:
        x=layers.add([x,input])
    return x
def hyper_tune_EEGnet(num_classes, chans=22, samples=768, pool_method='AVG'):
    data_shape = (3, chans, samples)
    pooling_options = {'AVG': AveragePooling2D, 'MAX': MaxPooling2D}

    model = Sequential()
    model.add(
        Conv2D(16, (1, 32),
               data_format='channels_first',
               padding='same',
               use_bias=False,
               kernel_initializer=glorot_normal(),
               input_shape=data_shape))

    model.add(BatchNormalization(axis=1, momentum=0.01))

    model.add(
        DepthwiseConv2D((chans, 1),
                        data_format='channels_first',
                        use_bias=False,
                        depth_multiplier=2,
                        depthwise_constraint=max_norm(1.),
                        kernel_initializer=glorot_normal()))

    model.add(BatchNormalization(axis=1, momentum=0.01))
    model.add(ELU())
    model.add(pooling_options[pool_method]((1, 4),
                                           data_format='channels_first'))
    model.add(Dropout(0.5))

    model.add(
        SeparableConv2D(32, (1, 16),
                        kernel_initializer=glorot_normal(),
                        use_bias=False,
                        padding='same',
                        data_format='channels_first'))

    model.add(BatchNormalization(axis=1, momentum=0.01))
    model.add(ELU())
    model.add(pooling_options[pool_method]((1, 8),
                                           data_format='channels_first'))
    model.add(Dropout(0.5))
    model.add(Flatten())
    model.add(
        Dense(num_classes,
              activation='softmax',
              kernel_constraint=max_norm(0.25)))
    return model
def origin_EEG_net(num_classes, chans=22, samples=768):
    '''
    original EEGnet
    '''
    data_shape = (3, chans, samples)
    model = Sequential()
    model.add(
        Conv2D(4, (1, 64),
               data_format='channels_first',
               padding='same',
               use_bias=False,
               input_shape=data_shape))

    model.add(BatchNormalization(axis=1))

    model.add(
        DepthwiseConv2D((chans, 1),
                        data_format='channels_first',
                        use_bias=False,
                        depth_multiplier=2,
                        depthwise_constraint=max_norm(1.)))

    model.add(BatchNormalization(axis=1))
    model.add(ELU())
    model.add(AveragePooling2D((1, 4), data_format='channels_first'))
    model.add(Dropout(0.25))

    model.add(
        SeparableConv2D(8, (1, 16),
                        use_bias=False,
                        padding='same',
                        data_format='channels_first'))

    model.add(BatchNormalization(axis=1))
    model.add(ELU())
    model.add(AveragePooling2D((1, 8), data_format='channels_first'))
    model.add(Dropout(0.25))
    model.add(Flatten())
    model.add(
        Dense(num_classes,
              activation='softmax',
              kernel_constraint=max_norm(0.25)))
    return model
Exemplo n.º 18
0
 def test_tiny_depthwise_conv_valid_pad_depth_multiplier(self):
     np.random.seed(1988)
     input_dim = 16
     input_shape = (input_dim, input_dim, 3)
     depth_multiplier = 2
     kernel_height = 3
     kernel_width = 3
     # Define a model
     model = Sequential()
     model.add(
         DepthwiseConv2D(depth_multiplier=depth_multiplier,
                         kernel_size=(kernel_height, kernel_width),
                         input_shape=input_shape,
                         padding='valid',
                         strides=(1, 1)))
     # Set some random weights
     model.set_weights(
         [np.random.rand(*w.shape) for w in model.get_weights()])
     # Test the keras model
     self._test_keras_model(model)
Exemplo n.º 19
0
    def _depthwise_sep_conv(self, x, filters, alpha, strides = (1, 1)):
        '''
        Creates a depthwise separable convolution block

        Args:
            x - input
            filters - the number of output filters
            alpha - width multiplier
            strides - the stride length of the convolution

        Returns:
            A depthwise separable convolution block
        '''
        y = DepthwiseConv2D((3, 3), padding = 'same', strides = strides)(x)
        y = BatchNormalization()(y)
        y = Activation('relu')(y)
        y = Conv2D(int(filters * alpha), (1, 1), padding = 'same')(y)
        y = BatchNormalization()(y)
        y = Activation('relu')(y)
        return y
def _depthwise_conv_block_mod(inputs,
                              pointwise_conv_filters,
                              alpha,
                              depth_multiplier=1,
                              strides=(1, 1),
                              block_id=1,
                              block_name=None):
    channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
    pointwise_conv_filters = int(pointwise_conv_filters * alpha)
    #
    if block_name is None:
        tname_convd = 'conv_dw_%d' % block_id
        tname_relud = 'conv_dw_%d_relu' % block_id
        tname_bnd = 'conv_dw_%d_bn' % block_id
        tname_convp = 'conv_pw_%d' % block_id
        tname_relup = 'conv_pw_%d_relu' % block_id
        tname_bnp = 'conv_pw_%d_bn' % block_id
    else:
        tname_convd = 'conv_dw_%s' % block_name
        tname_relud = 'conv_dw_%s_relu' % block_name
        tname_bnd = 'conv_dw_%s_bn' % block_name
        tname_convp = 'conv_pw_%s' % block_name
        tname_relup = 'conv_pw_%s_relu' % block_name
        tname_bnp = 'conv_pw_%s_bn' % block_name
    #
    x = DepthwiseConv2D((3, 3),
                        padding='same',
                        depth_multiplier=depth_multiplier,
                        strides=strides,
                        use_bias=False,
                        name=tname_convd)(inputs)
    x = BatchNormalization(axis=channel_axis, name=tname_bnd)(x)
    x = Activation(relu6, name=tname_relud)(x)
    x = Conv2D(pointwise_conv_filters, (1, 1),
               padding='same',
               use_bias=False,
               strides=(1, 1),
               name=tname_convp)(x)
    x = BatchNormalization(axis=channel_axis, name=tname_bnp)(x)
    return Activation(relu6, name=tname_relup)(x)
def bottleneck(x,
               f,
               stride,
               dm,
               button=False):  # stride=1,True; stride=2,False
    x_short = x
    x = conv(x, filters=f, strides=(1, 1))
    x = DepthwiseConv2D(
        kernel_size=(3, 3),
        strides=(stride, stride),
        padding='same',
        depth_multiplier=dm,
        depthwise_regularizer=keras.regularizers.l2(1e-6),
        depthwise_initializer=keras.initializers.glorot_uniform(seed=0))(x)
    x = BatchNormalization(axis=-1)(x)
    x = Activation(relu6)(x)
    x = my_conv(filters=f, strides=(1, 1))(x)
    x = BatchNormalization(axis=-1)(x)
    if button:
        x = keras.layers.Add()([x, x_short])
    x = LeakyReLU(alpha=0.05)(x)
    return x
Exemplo n.º 22
0
def _depthwise_conv_block(inputs, pointwise_conv_filters, alpha,
                          depth_multiplier=1, strides=(1, 1), block_id=1):
    """Adds a depthwise convolution block.

    A depthwise convolution block consists of a depthwise conv,
    batch normalization, relu6, pointwise convolution,
    batch normalization and relu6 activation.

    # Arguments
        inputs: Input tensor of shape `(rows, cols, channels)`
            (with `channels_last` data format) or
            (channels, rows, cols) (with `channels_first` data format).
        pointwise_conv_filters: Integer, the dimensionality of the output space
            (i.e. the number output of filters in the pointwise convolution).
        alpha: controls the width of the network.
            - If `alpha` < 1.0, proportionally decreases the number
                of filters in each layer.
            - If `alpha` > 1.0, proportionally increases the number
                of filters in each layer.
            - If `alpha` = 1, default number of filters from the paper
                 are used at each layer.
        depth_multiplier: The number of depthwise convolution output channels
            for each input channel.
            The total number of depthwise convolution output
            channels will be equal to `filters_in * depth_multiplier`.
        strides: An integer or tuple/list of 2 integers,
            specifying the strides of the convolution along the width and height.
            Can be a single integer to specify the same value for
            all spatial dimensions.
            Specifying any stride value != 1 is incompatible with specifying
            any `dilation_rate` value != 1.
        block_id: Integer, a unique identification designating the block number.

    # Input shape
        4D tensor with shape:
        `(batch, channels, rows, cols)` if data_format='channels_first'
        or 4D tensor with shape:
        `(batch, rows, cols, channels)` if data_format='channels_last'.

    # Output shape
        4D tensor with shape:
        `(batch, filters, new_rows, new_cols)` if data_format='channels_first'
        or 4D tensor with shape:
        `(batch, new_rows, new_cols, filters)` if data_format='channels_last'.
        `rows` and `cols` values might have changed due to stride.

    # Returns
        Output tensor of block.
    """
    channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
    pointwise_conv_filters = int(pointwise_conv_filters * alpha)

    x = DepthwiseConv2D((3, 3),
                        padding='same',
                        depth_multiplier=depth_multiplier,
                        strides=strides,
                        use_bias=False,
                        name='conv_dw_%d' % block_id)(inputs)
    x = BatchNormalization(axis=channel_axis, name='conv_dw_%d_bn' % block_id)(x)
    x = Activation(relu6, name='conv_dw_%d_relu' % block_id)(x)

    x = Conv2D(pointwise_conv_filters, (1, 1),
               padding='same',
               use_bias=False,
               strides=(1, 1),
               name='conv_pw_%d' % block_id)(x)
    x = BatchNormalization(axis=channel_axis, name='conv_pw_%d_bn' % block_id)(x)
    return Activation(relu6, name='conv_pw_%d_relu' % block_id)(x)
def bottleneck(inputs, filters, kernel, e, s, squeeze, nl):
    """Bottleneck
        This function defines a basic bottleneck structure.

        # Arguments
            inputs: Tensor, input tensor of conv layer.
            filters: Integer, the dimensionality of the output space.
            kernel: An integer or tuple/list of 2 integers, specifying the
                width and height of the 2D convolution window.
            e: Integer, expansion factor.
                t is always applied to the input size.
            s: An integer or tuple/list of 2 integers,specifying the strides
                of the convolution along the width and height.Can be a single
                integer to specify the same value for all spatial dimensions.
            squeeze: Boolean, Whether to use the squeeze.
            nl: String, nonlinearity activation type.
        # Returns
            Output tensor.
        """
    def _relu6(x):
        """Relu 6
            """
        return K.relu(x, max_value=6.0)

    def _hard_swish(x):
        """Hard swish
            """
        return x * K.relu(x + 3.0, max_value=6.0) / 6.0

    def _return_activation(x, nl):
        """Convolution Block
            This function defines a activation choice.

            # Arguments
                x: Tensor, input tensor of conv layer.
                nl: String, nonlinearity activation type.

            # Returns
                Output tensor.
            """
        if nl == 'HS':
            x = Activation(_hard_swish)(x)
        if nl == 'RE':
            x = Activation(_relu6)(x)
        return x

    def _conv_block(inputs, filters, kernel, strides, nl):
        """Convolution Block
            This function defines a 2D convolution operation with BN and activation.

            # Arguments
                inputs: Tensor, input tensor of conv layer.
                filters: Integer, the dimensionality of the output space.
                kernel: An integer or tuple/list of 2 integers, specifying the
                    width and height of the 2D convolution window.
                strides: An integer or tuple/list of 2 integers,
                    specifying the strides of the convolution along the width and height.
                    Can be a single integer to specify the same value for
                    all spatial dimensions.
                nl: String, nonlinearity activation type.
            # Returns
                Output tensor.
            """
        channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
        x = Conv2D(filters, kernel, padding='same', strides=strides)(inputs)
        x = BatchNormalization(axis=channel_axis)(x)
        return _return_activation(x, nl)

    def _squeeze(inputs):
        """Squeeze and Excitation.
            This function defines a squeeze structure.
            # Arguments
                inputs: Tensor, input tensor of conv layer.
            """
        input_channels = int(inputs.shape[-1])

        x = GlobalAveragePooling2D()(inputs)
        x = Dense(input_channels, activation='relu')(x)
        x = Dense(input_channels, activation='hard_sigmoid')(x)
        return x

    channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
    input_shape = K.int_shape(inputs)
    tchannel = input_shape[channel_axis] * e
    x = _conv_block(inputs, tchannel, (1, 1), (1, 1), nl)

    x = DepthwiseConv2D(kernel,
                        strides=(s, s),
                        depth_multiplier=1,
                        padding='same')(x)
    x = BatchNormalization(axis=channel_axis)(x)
    if squeeze:
        x = Lambda(lambda x: x * _squeeze(x))(x)
    x = _return_activation(x, nl)
    x = Conv2D(filters, (1, 1), strides=(1, 1), padding='same')(x)
    x = BatchNormalization(axis=channel_axis)(x)
    return x
Exemplo n.º 24
0
def MyYOLONet(X, Y):
    kernel_size = (3, 3)  # convolution kernel size
    pool_size = (2, 2)  # size of pooling area for max pooling
    nfilters = 32

    nlayers1 = 4
    nlayers2 = 3
    inputs = Input(shape=X[0].shape)

    x = Conv2D(nfilters,
               kernel_size,
               strides=(1, 1),
               padding='same',
               use_bias=False)(inputs)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = MaxPooling2D(pool_size=pool_size)(x)

    for n in range(nlayers1 - 1):
        x = Conv2D(nfilters,
                   kernel_size,
                   strides=(1, 1),
                   padding='same',
                   use_bias=False)(x)
        x = BatchNormalization()(x)
        x = LeakyReLU(alpha=0.1)(x)
        x = MaxPooling2D(pool_size=pool_size)(x)

    skip_connection = x

    for n in range(nlayers2):
        x = Conv2D(nfilters * 2, (1, 1),
                   strides=(1, 1),
                   padding='same',
                   use_bias=False)(x)
        x = BatchNormalization(axis=1)(x)
        x = LeakyReLU(alpha=0.1)(x)

    skip_connection = Conv2D(32, (1, 1),
                             strides=(1, 1),
                             padding='same',
                             use_bias=False)(skip_connection)
    skip_connection = BatchNormalization()(skip_connection)
    skip_connection = LeakyReLU(alpha=0.1)(skip_connection)
    skip_connection = DepthwiseConv2D((1, 1))(skip_connection)
    #skip_connection = Lambda(space_to_depth_x2)(skip_connection)
    x = concatenate([skip_connection, x])

    x = Conv2D(nfilters,
               kernel_size,
               strides=(1, 1),
               padding='same',
               use_bias=False)(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = MaxPooling2D(pool_size=pool_size)(x)

    x = Conv2D(16, (1, 1), strides=(1, 1), padding='same', use_bias=False)(x)
    x = MaxPooling2D(pool_size=pool_size)(x)

    x = Flatten()(x)
    predictions = x  # #= Dense(Y[0].size,name='FinalOutput')(x)

    model = Model(inputs=inputs, outputs=predictions)
    return model
Exemplo n.º 25
0
def add_conv_block_bn(prev_layer, weights_file, filters, size, stride, pad, groups, activation, batch_normalize):
    if size % 2 == 0:
        raise ValueError('Filter size must be odd number!')

    if not (groups == 1 or groups == filters):
        raise ValueError('Currently only Conv2D and DepthwiseConv2D are supported!')

    weights_read = 0
    prev_layer_channels = prev_layer._keras_shape[-1]

    use_bias = not batch_normalize
    is_depthwise_conv = groups == filters
    keras_padding = padding_type_from_pad_number(size, pad)

    # TensorFlow weights order: (height, width, in_dim, out_dim)
    weights_shape = (size, size, prev_layer_channels, filters // groups)

    # DarkNet weights are serialized Caffe-style: (out_dim, in_dim, height, width)
    darknet_w_shape = (filters // groups, prev_layer_channels, size, size)
    kernel_weights_count = np.product(weights_shape)

    # Weights in DarkNet are stored as [biases, [bn_scales, bn_mean, bn_variance], kernels]
    # See "save_convolutional_weights" here: https://github.com/pjreddie/darknet/blob/master/src/parser.c
    bias_buffer = weights_file.read(filters * 4)
    conv_bias = np.ndarray(shape=(filters, ), dtype='float32', buffer=bias_buffer)
    weights_read += filters

    if batch_normalize:
        bn_epsilon = 1e-5

        # Note: DarkNet doesn't have "beta" in convolutions (biases are used instead)
        bn_buffer = weights_file.read(filters * 12)
        bn_weights = np.ndarray(shape=(3, filters), dtype='float32', buffer=bn_buffer)
        weights_read += 3 * filters

        gamma, running_mean, running_var = bn_weights[0], bn_weights[1], bn_weights[2]

        batch_norm_weights = [
            gamma, conv_bias, running_mean, running_var
        ]

    kernels_buffer = weights_file.read(kernel_weights_count * 4)
    conv_kernels = np.ndarray(shape=darknet_w_shape, dtype='float32', buffer=kernels_buffer)
    conv_kernels = np.transpose(conv_kernels, [2, 3, 1, 0])

    assert conv_kernels.shape == weights_shape
    weights_read += kernel_weights_count

    layer_weights = [conv_kernels]
    if use_bias:
        layer_weights.append(conv_bias)

    # Create Conv2D or DepthwiseConv2D layer
    layer_params = {
        'kernel_size': (size, size),
        'strides': (stride, stride),
        'use_bias': use_bias,
        'weights': layer_weights,
        'activation': None,
        'padding': keras_padding
    }

    if is_depthwise_conv:
        conv_block = DepthwiseConv2D(**layer_params)(prev_layer)
    else:
        conv_block = Conv2D(filters, **layer_params)(prev_layer)

    # Add BatchNormalization layer if necessary
    if batch_normalize:
        conv_block = BatchNormalization(weights=batch_norm_weights, epsilon=bn_epsilon)(conv_block)

    # Finally, add activation layer
    act_layer = activation_layer_from_name(activation)
    conv_block = act_layer(conv_block)

    return conv_block, weights_read
Exemplo n.º 26
0
 def _conv_block_two(x):
   filters = 16
   x = DepthwiseConv2D(
           (3,3),
           strides=(1,1),
           depth_multiplier=1,
           padding='same',
           kernel_initializer='he_normal',
           use_bias=False,
           activation=None)(x)
   x = BatchNormalization(epsilon=1e-3, momentum=0.999)(x)
   x = Activation(relu6)(x)
   x = Conv2D(
           filters, (1,1),
           strides=(1,1),
           padding='same',
           kernel_initializer='he_normal',
           use_bias=False,
           activation=None)(x)
   x = BatchNormalization(epsilon=1e-3, momentum=0.999)(x)
   x = Activation(relu6)(x)
   x = MaxPooling2D(
           (1,3),
           strides=(1,2),
           padding='valid')(x)
   #
   filters = 16
   x = DepthwiseConv2D(
           (3,3),
           strides=(1,1),
           depth_multiplier=1,
           padding='same',
           kernel_initializer='he_normal',
           use_bias=False,
           activation=None)(x)
   x = BatchNormalization(epsilon=1e-3, momentum=0.999)(x)
   x = Activation(relu6)(x)
   x = Conv2D(
           filters, (1,1),
           strides=(1,1),
           padding='same',
           kernel_initializer='he_normal',
           use_bias=False,
           activation=None)(x)
   x = BatchNormalization(epsilon=1e-3, momentum=0.999)(x)
   x = Activation(relu6)(x)
   x = MaxPooling2D(
           (1,3),
           strides=(1,2),
           padding='valid')(x)
   #
   #filters = 16
   #x = DepthwiseConv2D(
   #        (3,3),
   #        strides=(1,1),
   #        depth_multiplier=1,
   #        padding='same',
   #        kernel_initializer='he_normal',
   #        use_bias=False,
   #        activation=None)(x)
   #x = BatchNormalization(epsilon=1e-3, momentum=0.999)(x)
   #x = Activation(relu6)(x)
   #x = Conv2D(
   #        filters, (1,1),
   #        strides=(1,1),
   #        padding='same',
   #        kernel_initializer='he_normal',
   #        use_bias=False,
   #        activation=None)(x)
   #x = BatchNormalization(epsilon=1e-3, momentum=0.999)(x)
   #x = Activation(relu6)(x)
   #x = MaxPooling2D(
   #        (1,3),
   #        strides=(1,2),
   #        padding='valid')(x)
   return x
Exemplo n.º 27
0
def _inverted_res_block(inputs,
                        expansion,
                        stride,
                        alpha,
                        filters,
                        block_id,
                        skip_connection,
                        rate=1):
    """Inverted Residual Block
    This function defines a sequence of 1 or more identical layers.
    # Arguments
        inputs: Tensor, input tensor of conv layer.
        filters: Integer, the dimensionality of the output space.
        kernel: An integer or tuple/list of 2 integers, specifying the
            width and height of the 2D convolution window.
        t: Integer, expansion factor.
            t is always applied to the input size.
        s: An integer or tuple/list of 2 integers,specifying the strides
            of the convolution along the width and height.Can be a single
            integer to specify the same value for all spatial dimensions.
        n: Integer, layer repeat times.
    # Returns
        Output tensor.
    """

    in_channels = inputs._keras_shape[-1]
    pointwise_conv_filters = int(filters * alpha)
    pointwise_filters = _make_divisible(pointwise_conv_filters, 8)
    x = inputs
    prefix = 'expanded_conv_{}_'.format(block_id)
    """Convolution Block
    This function defines a 2D convolution operation with BN and relu6.
    # Arguments
        inputs: Tensor, input tensor of conv layer.
        filters: Integer, the dimensionality of the output space.
        kernel: An integer or tuple/list of 2 integers, specifying the
            width and height of the 2D convolution window.
        strides: An integer or tuple/list of 2 integers,
            specifying the strides of the convolution along the width and height.
            Can be a single integer to specify the same value for
            all spatial dimensions.
    # Returns
        Output tensor.
    """

    if block_id:
        # Expand

        x = Conv2D(expansion * in_channels,
                   kernel_size=1,
                   padding='same',
                   use_bias=False,
                   activation=None,
                   name=prefix + 'expand')(x)
        x = BatchNormalization(epsilon=1e-3,
                               momentum=0.999,
                               name=prefix + 'expand_BN')(x)
        x = Activation(relu6, name=prefix + 'expand_relu')(x)
    else:
        prefix = 'expanded_conv_'
    """Bottleneck
    This function defines a basic bottleneck structure.
    # Arguments
        inputs: Tensor, input tensor of conv layer.
        filters: Integer, the dimensionality of the output space.
        kernel: An integer or tuple/list of 2 integers, specifying the
            width and height of the 2D convolution window.
        t: Integer, expansion factor.
            t is always applied to the input size.
        s: An integer or tuple/list of 2 integers,specifying the strides
            of the convolution along the width and height.Can be a single
            integer to specify the same value for all spatial dimensions.
        r: Boolean, Whether to use the residuals.
    # Returns
        Output tensor.
    """
    # Depthwise /Bottleneck

    x = DepthwiseConv2D(kernel_size=3,
                        strides=stride,
                        activation=None,
                        use_bias=False,
                        padding='same',
                        dilation_rate=(rate, rate),
                        name=prefix + 'depthwise')(x)
    x = BatchNormalization(epsilon=1e-3,
                           momentum=0.999,
                           name=prefix + 'depthwise_BN')(x)

    x = Activation(relu6, name=prefix + 'depthwise_relu')(x)

    # Project
    x = Conv2D(pointwise_filters,
               kernel_size=1,
               padding='same',
               use_bias=False,
               activation=None,
               name=prefix + 'project')(x)
    x = BatchNormalization(epsilon=1e-3,
                           momentum=0.999,
                           name=prefix + 'project_BN')(x)

    if skip_connection:
        return Add(name=prefix + 'add')([inputs, x])

    # if in_channels == pointwise_filters and stride == 1:
    #    return Add(name='res_connect_' + str(block_id))([inputs, x])

    return x
Exemplo n.º 28
0
def build_mobilenet_v2(input_shape):

    model = Sequential()
    inputs = Input(input_shape)

    #   1st
    model.add(Conv2D(32, (2, 2), strides=(2, 2), input_shape=input_shape))
    # model.add(Conv2D(32, (3, 3), strides=(2, 2))(inputs))
    # model = Conv2D(32, (3, 3), strides=(2, 2))(inputs)
    model.add(BatchNormalization(axis=-1))
    model.add(Activation(relu6))

    #   2nd
    model.add(Conv2D(32, (1, 1), strides=(1, 1)))
    model.add(BatchNormalization(axis=-1))
    model.add(Activation(relu6))

    model.add(
        DepthwiseConv2D((3, 3),
                        strides=(1, 1),
                        depth_multiplier=1,
                        padding='same'))
    model.add(BatchNormalization(axis=-1))
    model.add(Activation(relu6))

    model.add(Conv2D(16, (1, 1), strides=(1, 1), padding='same'))
    model.add(BatchNormalization(axis=-1))

    #   3rd
    model.add(Conv2D(96, (1, 1), strides=(1, 1)))
    model.add(BatchNormalization(axis=-1))
    model.add(Activation(relu6))

    model.add(
        DepthwiseConv2D((3, 3),
                        strides=(2, 2),
                        depth_multiplier=1,
                        padding='same'))
    model.add(BatchNormalization(axis=-1))
    model.add(Activation(relu6))

    model.add(Conv2D(24, (1, 1), strides=(1, 1), padding='same'))
    model.add(BatchNormalization(axis=-1))
    # repeat 2 times
    model.add(Conv2D(144, (1, 1), strides=(1, 1)))
    model.add(BatchNormalization(axis=-1))
    model.add(Activation(relu6))

    model.add(
        DepthwiseConv2D((3, 3),
                        strides=(1, 1),
                        depth_multiplier=1,
                        padding='same'))
    model.add(BatchNormalization(axis=-1))
    model.add(Activation(relu6))

    model.add(Conv2D(24, (1, 1), strides=(1, 1), padding='same'))
    model.add(BatchNormalization(axis=-1))

    # 4th
    model.add(Conv2D(144, (1, 1), strides=(1, 1)))
    model.add(BatchNormalization(axis=-1))
    model.add(Activation(relu6))

    model.add(
        DepthwiseConv2D((3, 3),
                        strides=(2, 2),
                        depth_multiplier=1,
                        padding='same'))
    model.add(BatchNormalization(axis=-1))
    model.add(Activation(relu6))

    model.add(Conv2D(32, (1, 1), strides=(1, 1), padding='same'))
    model.add(BatchNormalization(axis=-1))
    # repeat 3 times
    model.add(Conv2D(196, (1, 1), strides=(1, 1)))
    model.add(BatchNormalization(axis=-1))
    model.add(Activation(relu6))

    model.add(
        DepthwiseConv2D((3, 3),
                        strides=(1, 1),
                        depth_multiplier=1,
                        padding='same'))
    model.add(BatchNormalization(axis=-1))
    model.add(Activation(relu6))

    model.add(Conv2D(32, (1, 1), strides=(1, 1), padding='same'))
    model.add(BatchNormalization(axis=-1))
    #
    model.add(Conv2D(196, (1, 1), strides=(1, 1)))
    model.add(BatchNormalization(axis=-1))
    model.add(Activation(relu6))

    model.add(
        DepthwiseConv2D((3, 3),
                        strides=(1, 1),
                        depth_multiplier=1,
                        padding='same'))
    model.add(BatchNormalization(axis=-1))
    model.add(Activation(relu6))

    model.add(Conv2D(32, (1, 1), strides=(1, 1), padding='same'))
    model.add(BatchNormalization(axis=-1))

    # 5th
    model.add(Conv2D(196, (1, 1), strides=(1, 1)))
    model.add(BatchNormalization(axis=-1))
    model.add(Activation(relu6))

    model.add(
        DepthwiseConv2D((3, 3),
                        strides=(2, 2),
                        depth_multiplier=1,
                        padding='same'))
    model.add(BatchNormalization(axis=-1))
    model.add(Activation(relu6))

    model.add(Conv2D(64, (1, 1), strides=(1, 1), padding='same'))
    model.add(BatchNormalization(axis=-1))
    # repeat 4 times
    model.add(Conv2D(384, (1, 1), strides=(1, 1)))
    model.add(BatchNormalization(axis=-1))
    model.add(Activation(relu6))

    model.add(
        DepthwiseConv2D((3, 3),
                        strides=(1, 1),
                        depth_multiplier=1,
                        padding='same'))
    model.add(BatchNormalization(axis=-1))
    model.add(Activation(relu6))

    model.add(Conv2D(64, (1, 1), strides=(1, 1), padding='same'))
    model.add(BatchNormalization(axis=-1))
    #
    model.add(Conv2D(384, (1, 1), strides=(1, 1)))
    model.add(BatchNormalization(axis=-1))
    model.add(Activation(relu6))

    model.add(
        DepthwiseConv2D((3, 3),
                        strides=(1, 1),
                        depth_multiplier=1,
                        padding='same'))
    model.add(BatchNormalization(axis=-1))
    model.add(Activation(relu6))

    model.add(Conv2D(64, (1, 1), strides=(1, 1), padding='same'))
    model.add(BatchNormalization(axis=-1))
    #
    model.add(Conv2D(384, (1, 1), strides=(1, 1)))
    model.add(BatchNormalization(axis=-1))
    model.add(Activation(relu6))

    model.add(
        DepthwiseConv2D((3, 3),
                        strides=(1, 1),
                        depth_multiplier=1,
                        padding='same'))
    model.add(BatchNormalization(axis=-1))
    model.add(Activation(relu6))

    model.add(Conv2D(64, (1, 1), strides=(1, 1), padding='same'))
    model.add(BatchNormalization(axis=-1))

    # 6th
    model.add(Conv2D(384, (1, 1), strides=(1, 1)))
    model.add(BatchNormalization(axis=-1))
    model.add(Activation(relu6))

    model.add(
        DepthwiseConv2D((3, 3),
                        strides=(1, 1),
                        depth_multiplier=1,
                        padding='same'))
    model.add(BatchNormalization(axis=-1))
    model.add(Activation(relu6))

    model.add(Conv2D(96, (1, 1), strides=(1, 1), padding='same'))
    model.add(BatchNormalization(axis=-1))
    # repeat 3 times
    model.add(Conv2D(576, (1, 1), strides=(1, 1)))
    model.add(BatchNormalization(axis=-1))
    model.add(Activation(relu6))

    model.add(
        DepthwiseConv2D((3, 3),
                        strides=(1, 1),
                        depth_multiplier=1,
                        padding='same'))
    model.add(BatchNormalization(axis=-1))
    model.add(Activation(relu6))

    model.add(Conv2D(96, (1, 1), strides=(1, 1), padding='same'))
    model.add(BatchNormalization(axis=-1))
    #
    model.add(Conv2D(576, (1, 1), strides=(1, 1)))
    model.add(BatchNormalization(axis=-1))
    model.add(Activation(relu6))

    model.add(
        DepthwiseConv2D((3, 3),
                        strides=(1, 1),
                        depth_multiplier=1,
                        padding='same'))
    model.add(BatchNormalization(axis=-1))
    model.add(Activation(relu6))

    model.add(Conv2D(96, (1, 1), strides=(1, 1), padding='same'))
    model.add(BatchNormalization(axis=-1))

    # 7th
    model.add(Conv2D(576, (1, 1), strides=(1, 1)))
    model.add(BatchNormalization(axis=-1))
    model.add(Activation(relu6))

    model.add(
        DepthwiseConv2D((3, 3),
                        strides=(2, 2),
                        depth_multiplier=1,
                        padding='same'))
    model.add(BatchNormalization(axis=-1))
    model.add(Activation(relu6))

    model.add(Conv2D(160, (1, 1), strides=(1, 1), padding='same'))
    model.add(BatchNormalization(axis=-1))
    # repeat 3 times
    model.add(Conv2D(960, (1, 1), strides=(1, 1)))
    model.add(BatchNormalization(axis=-1))
    model.add(Activation(relu6))

    model.add(
        DepthwiseConv2D((3, 3),
                        strides=(1, 1),
                        depth_multiplier=1,
                        padding='same'))
    model.add(BatchNormalization(axis=-1))
    model.add(Activation(relu6))

    model.add(Conv2D(160, (1, 1), strides=(1, 1), padding='same'))
    model.add(BatchNormalization(axis=-1))
    #
    model.add(Conv2D(960, (1, 1), strides=(1, 1)))
    model.add(BatchNormalization(axis=-1))
    model.add(Activation(relu6))

    model.add(
        DepthwiseConv2D((3, 3),
                        strides=(1, 1),
                        depth_multiplier=1,
                        padding='same'))
    model.add(BatchNormalization(axis=-1))
    model.add(Activation(relu6))

    model.add(Conv2D(160, (1, 1), strides=(1, 1), padding='same'))
    model.add(BatchNormalization(axis=-1))

    # 8th
    model.add(Conv2D(960, (1, 1), strides=(1, 1)))
    model.add(BatchNormalization(axis=-1))
    model.add(Activation(relu6))

    model.add(
        DepthwiseConv2D((3, 3),
                        strides=(1, 1),
                        depth_multiplier=1,
                        padding='same'))
    model.add(BatchNormalization(axis=-1))
    model.add(Activation(relu6))

    model.add(Conv2D(320, (1, 1), strides=(1, 1), padding='same'))
    model.add(BatchNormalization(axis=-1))

    # 9th
    model.add(Conv2D(1280, (1, 1), strides=(1, 1)))
    model.add(BatchNormalization(axis=-1))
    model.add(Activation(relu6))

    # 10th
    # use two FC to solve the problem of nonlinearity
    model.add(Flatten())
    model.add(Dense(16))
    model.add(Activation(relu6))
    #    model.add(Dense(2), activation='softmax')
    model.add(Dense(2))
    model.add(Activation('softmax'))
    # sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    # model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])

    return model
Exemplo n.º 29
0
def _shuffle_unit(inputs,
                  in_channels,
                  out_channels,
                  groups,
                  bottleneck_ratio,
                  strides=2,
                  stage=1,
                  block=1):
    """
    creates a shuffleunit

    Parameters
    ----------
    inputs:
        Input tensor of with `channels_last` data format
    in_channels:
        number of input channels
    out_channels:
        number of output channels
    strides:
        An integer or tuple/list of 2 integers,
        specifying the strides of the convolution along the width and height.
    groups: int(1)
        number of groups per channel
    bottleneck_ratio: float
        bottleneck ratio implies the ratio of bottleneck channels to output channels.
        For example, bottleneck ratio = 1 : 4 means the output feature map is 4 times
        the width of the bottleneck feature map.
    stage: int(1)
        stage number
    block: int(1)
        block number

    Returns
    -------

    """
    if K.image_data_format() == 'channels_last':
        bn_axis = -1
    else:
        bn_axis = 1

    prefix = 'stage%d/block%d' % (stage, block)

    #if strides >= 2:
    #out_channels -= in_channels

    # default: 1/4 of the output channel of a ShuffleNet Unit
    bottleneck_channels = int(out_channels * bottleneck_ratio)
    groups = (1 if stage == 2 and block == 1 else groups)

    x = _group_conv(inputs,
                    in_channels,
                    out_channels=bottleneck_channels,
                    groups=(1 if stage == 2 and block == 1 else groups),
                    name='%s/1x1_gconv_1' % prefix)
    x = BatchNormalization(axis=bn_axis, name='%s/bn_gconv_1' % prefix)(x)
    x = Activation('relu', name='%s/relu_gconv_1' % prefix)(x)

    x = Lambda(channel_shuffle,
               arguments={'groups': groups},
               name='%s/channel_shuffle' % prefix)(x)
    x = DepthwiseConv2D(kernel_size=(3, 3),
                        padding="same",
                        use_bias=False,
                        strides=strides,
                        name='%s/1x1_dwconv_1' % prefix)(x)
    x = BatchNormalization(axis=bn_axis, name='%s/bn_dwconv_1' % prefix)(x)

    x = _group_conv(
        x,
        bottleneck_channels,
        out_channels=out_channels if strides == 1 else out_channels -
        in_channels,
        groups=groups,
        name='%s/1x1_gconv_2' % prefix)
    x = BatchNormalization(axis=bn_axis, name='%s/bn_gconv_2' % prefix)(x)

    if strides < 2:
        ret = Add(name='%s/add' % prefix)([x, inputs])
    else:
        avg = AveragePooling2D(pool_size=3,
                               strides=2,
                               padding='same',
                               name='%s/avg_pool' % prefix)(inputs)
        ret = Concatenate(bn_axis, name='%s/concat' % prefix)([x, avg])

    ret = Activation('relu', name='%s/relu_out' % prefix)(ret)

    return ret
Exemplo n.º 30
0
def EEGNet_org(nb_classes,
               Chans=64,
               Samples=128,
               regRate=0.0001,
               dropoutRate=0.25,
               kernLength=64,
               numFilters=8):
    """ Keras Implementation of EEGNet (https://arxiv.org/abs/1611.08024v3)

    Requires Tensorflow >= 1.5 and Keras >= 2.1.3

    Note that this implements the newest version of EEGNet and NOT the earlier
    version (version v1 and v2 on arxiv). We strongly recommend using this
    architecture as it performs much better and has nicer properties than
    our earlier version.

    Note that we use 'image_data_format' = 'channels_first' in there keras.json
    configuration file.

    Inputs:

        nb_classes: int, number of classes to classify
        Chans, Samples: number of channels and time points in the EEG data
        regRate: regularization parameter for L1 and L2 penalties
        dropoutRate: dropout fraction
        kernLength: length of temporal convolution in first layer
        numFilters: number of temporal-spatial filter pairs to learn

    Depending on the task, using numFilters = 4 or 8 seemed to do pretty well
    across tasks.

    """

    input1 = Input(shape=(1, Chans, Samples))

    ##################################################################
    layer1 = Conv2D(numFilters, (1, kernLength),
                    padding='same',
                    kernel_regularizer=l1_l2(l1=0.0, l2=0.0),
                    input_shape=(1, Chans, Samples),
                    use_bias=False)(input1)
    layer1 = BatchNormalization(axis=1)(layer1)
    layer1 = DepthwiseConv2D((Chans, 1),
                             depthwise_regularizer=l1_l2(l1=regRate,
                                                         l2=regRate),
                             use_bias=False)(layer1)
    layer1 = BatchNormalization(axis=1)(layer1)
    layer1 = Activation('elu')(layer1)
    layer1 = SpatialDropout2D(dropoutRate)(layer1)

    layer2 = SeparableConv2D(numFilters, (1, 8),
                             depthwise_regularizer=l1_l2(l1=0.0, l2=regRate),
                             use_bias=False,
                             padding='same')(layer1)
    layer2 = BatchNormalization(axis=1)(layer2)
    layer2 = Activation('elu')(layer2)
    layer2 = AveragePooling2D((1, 4))(layer2)
    layer2 = SpatialDropout2D(dropoutRate)(layer2)

    layer3 = SeparableConv2D(numFilters * 2, (1, 8),
                             depth_multiplier=2,
                             depthwise_regularizer=l1_l2(l1=0.0, l2=regRate),
                             use_bias=False,
                             padding='same')(layer2)
    layer3 = BatchNormalization(axis=1)(layer3)
    layer3 = Activation('elu')(layer3)
    layer3 = AveragePooling2D((1, 4))(layer3)
    layer3 = SpatialDropout2D(dropoutRate)(layer3)

    flatten = Flatten(name='flatten')(layer3)

    dense = Dense(nb_classes, name='dense')(flatten)
    softmax = Activation('softmax', name='softmax')(dense)

    return Model(inputs=input1, outputs=softmax)