def inverted_res_block(input_tensor, expansion, stride, alpha, filters): in_channels = input_tensor.shape.as_list()[-1] filters = r(filters * alpha) output_tensor = input_tensor output_tensor = Conv2D(expansion * in_channels, kernel_size=(1, 1), use_bias=False)(output_tensor) output_tensor = BatchNormalization( epsilon=batch_norm_eps, momentum=batch_norm_momentum)(output_tensor) output_tensor = ReLU(relu_threshold)(output_tensor) output_tensor = ZeroPadding2D()(output_tensor) output_tensor = DepthwiseConv2D(kernel_size=(3, 3), strides=stride, use_bias=False)(output_tensor) output_tensor = BatchNormalization( epsilon=batch_norm_eps, momentum=batch_norm_momentum)(output_tensor) output_tensor = ReLU(relu_threshold)(output_tensor) output_tensor = Conv2D(filters, kernel_size=(1, 1), use_bias=False)(output_tensor) output_tensor = BatchNormalization( epsilon=batch_norm_eps, momentum=batch_norm_momentum)(output_tensor) if in_channels == filters and stride == 1: output_tensor = Add()([input_tensor, output_tensor]) return output_tensor
def _inverted_res_block(inputs, expansion, stride, alpha, filters, block_id, skip_connection, rate=1): in_channels = inputs.shape[-1].value # inputs._keras_shape[-1] pointwise_conv_filters = int(filters * alpha) pointwise_filters = _make_divisible(pointwise_conv_filters, 8) x = inputs prefix = 'expanded_conv_{}_'.format(block_id) if block_id: # Expand x = Conv2D(expansion * in_channels, kernel_size=1, padding='same', use_bias=False, activation=None, name=prefix + 'expand')(x) x = BatchNormalization(epsilon=1e-3, momentum=0.999, name=prefix + 'expand_BN')(x) x = Activation(tf.nn.relu6, name=prefix + 'expand_relu')(x) else: prefix = 'expanded_conv_' # Depthwise x = DepthwiseConv2D(kernel_size=3, strides=stride, activation=None, use_bias=False, padding='same', dilation_rate=(rate, rate), name=prefix + 'depthwise')(x) x = BatchNormalization(epsilon=1e-3, momentum=0.999, name=prefix + 'depthwise_BN')(x) x = Activation(tf.nn.relu6, name=prefix + 'depthwise_relu')(x) # Project x = Conv2D(pointwise_filters, kernel_size=1, padding='same', use_bias=False, activation=None, name=prefix + 'project')(x) x = BatchNormalization(epsilon=1e-3, momentum=0.999, name=prefix + 'project_BN')(x) if skip_connection: return Add(name=prefix + 'add')([inputs, x]) # if in_channels == pointwise_filters and stride == 1: # return Add(name='res_connect_' + str(block_id))([inputs, x]) return x
def separable_conv_with_batch_normalization(x, filters, prefix, stride=1, kernel_size=3, rate=1, depth_activation=True, epsilon=1e-5): if stride == 1: depth_padding = 'same' else: x = effective_padding(x, kernel_size, rate) depth_padding = 'valid' if not depth_activation: x = Activation('relu')(x) x = DepthwiseConv2D((kernel_size, kernel_size), strides=(stride, stride), dilation_rate=(rate, rate), padding=depth_padding, use_bias=False, name=prefix + '_depthwise')(x) x = BatchNormalization(name=prefix + '_depthwise_BN', epsilon=epsilon)(x) if depth_activation: x = Activation('relu')(x) x = Conv2D(filters, (1, 1), padding='same', use_bias=False, name=prefix + '_pointwise')(x) x = BatchNormalization(name=prefix + '_pointwise_batch_normalization', epsilon=epsilon)(x) if depth_activation: x = Activation('relu')(x) return x
def _bottleneck(self,inputs, filters, kernel, t, s, r=False,act=relu6): """Bottleneck This function defines a basic bottleneck structure. # Arguments inputs: Tensor, input tensor of conv layer. filters: Integer, the dimensionality of the output space. kernel: An integer or tuple/list of 2 integers, specifying the width and height of the 2D convolution window. t: Integer, expansion factor. t is always applied to the input size. s: An integer or tuple/list of 2 integers,specifying the strides of the convolution along the width and height.Can be a single integer to specify the same value for all spatial dimensions. r: Boolean, Whether to use the residuals. # Returns Output tensor. """ channel_axis = 1 if K.image_data_format() == 'channels_first' else -1 tchannel = K.int_shape(inputs)[channel_axis] * t x = self._conv_block(inputs, tchannel, (1, 1), (1, 1),act=act) x = DepthwiseConv2D(kernel, strides=(s, s), depth_multiplier=1, padding='same')(x) x = BatchNormalization(axis=channel_axis)(x) x = Activation(act)(x) x = Conv2D(filters, (1, 1), strides=(1, 1), padding='same')(x) x = BatchNormalization(axis=channel_axis)(x) if r: x = add([x, inputs]) return x
def conv_effnet(x, kernel, filters, downsample, name, bottleneck=0.5, strides=(1, 1), padding='same', bias=False): """Pointwise -> Spatially Separable conv&pooling Effnet style""" assert downsample[0] == downsample[1] downsample = downsample[0] assert kernel[0] == kernel[1] kernel = kernel[0] ch_in = int(filters * bottleneck) ch_out = filters if padding == 'valid': pad = ((0, kernel // 2), (0, kernel // 2)) x = ZeroPadding2D(padding=pad, name=name + 'pad')(x) x = Conv2D(ch_in, (1, 1), strides=downsample, padding=padding, use_bias=bias, name=name + 'pw')(x) x = add_common(x, name=name + 'pw') x = DepthwiseConv2D((1, kernel), padding=padding, use_bias=bias, name=name + 'dwv')(x) x = add_common(x, name=name + 'dwv') x = DepthwiseConv2D((kernel, 1), padding='same', use_bias=bias, name=name + 'dwh')(x) x = add_common(x, name=name + 'dwh') x = Conv2D(ch_out, (1, 1), padding=padding, use_bias=bias, name=name + 'rh')(x) return add_common(x, name=name + 'rh')
def EEGNet_SSVEP(nb_classes = 12, Chans = 8, Samples = 256, dropoutRate = 0.5, kernLength = 256, F1 = 96, D = 1, F2 = 96, dropoutType = 'Dropout'): """ SSVEP Variant of EEGNet, as used in [1]. Inputs: nb_classes : int, number of classes to classify Chans, Samples : number of channels and time points in the EEG data dropoutRate : dropout fraction kernLength : length of temporal convolution in first layer F1, F2 : number of temporal filters (F1) and number of pointwise filters (F2) to learn. D : number of spatial filters to learn within each temporal convolution. dropoutType : Either SpatialDropout2D or Dropout, passed as a string. [1]. Waytowich, N. et. al. (2018). Compact Convolutional Neural Networks for Classification of Asynchronous Steady-State Visual Evoked Potentials. Journal of Neural Engineering vol. 15(6). http://iopscience.iop.org/article/10.1088/1741-2552/aae5d8 """ if dropoutType == 'SpatialDropout2D': dropoutType = SpatialDropout2D elif dropoutType == 'Dropout': dropoutType = Dropout else: raise ValueError('dropoutType must be one of SpatialDropout2D ' 'or Dropout, passed as a string.') input1 = Input(shape = (1, Chans, Samples)) ################################################################## block1 = Conv2D(F1, (1, kernLength), padding = 'same', input_shape = (1, Chans, Samples), use_bias = False)(input1) block1 = BatchNormalization(axis = 1)(block1) block1 = DepthwiseConv2D((Chans, 1), use_bias = False, depth_multiplier = D, depthwise_constraint = max_norm(1.))(block1) block1 = BatchNormalization(axis = 1)(block1) block1 = Activation('elu')(block1) block1 = AveragePooling2D((1, 4))(block1) block1 = dropoutType(dropoutRate)(block1) block2 = SeparableConv2D(F2, (1, 16), use_bias = False, padding = 'same')(block1) block2 = BatchNormalization(axis = 1)(block2) block2 = Activation('elu')(block2) block2 = AveragePooling2D((1, 8))(block2) block2 = dropoutType(dropoutRate)(block2) flatten = Flatten(name = 'flatten')(block2) dense = Dense(nb_classes, name = 'dense')(flatten) softmax = Activation('softmax', name = 'softmax')(dense) return Model(inputs=input1, outputs=softmax)
def SeparableConv2D_with_batchnorm(filters, kernel_size, name=None): return Sequential([ DepthwiseConv2D(kernel_size=kernel_size, padding='same'), BatchNormalizationV2(epsilon=1e-5, momentum=0.999), ReLU(max_value=6.), Conv2D(filters=filters, kernel_size=1, padding='valid') ], name=name)
def aspp(x, input_shape, out_stride): """ """ b0 = Conv2D(256, (1, 1), padding="same", use_bias=False)(x) b0 = BatchNormalization()(b0) b0 = Activation("relu")(b0) b1 = DepthwiseConv2D((3, 3), dilation_rate=(6, 6), padding="same", use_bias=False)(x) b1 = BatchNormalization()(b1) b1 = Activation("relu")(b1) b1 = Conv2D(256, (1, 1), padding="same", use_bias=False)(b1) b1 = BatchNormalization()(b1) b1 = Activation("relu")(b1) b2 = DepthwiseConv2D((3, 3), dilation_rate=(12, 12), padding="same", use_bias=False)(x) b2 = BatchNormalization()(b2) b2 = Activation("relu")(b2) b2 = Conv2D(256, (1, 1), padding="same", use_bias=False)(b2) b2 = BatchNormalization()(b2) b2 = Activation("relu")(b2) b3 = DepthwiseConv2D((3, 3), dilation_rate=(12, 12), padding="same", use_bias=False)(x) b3 = BatchNormalization()(b3) b3 = Activation("relu")(b3) b3 = Conv2D(256, (1, 1), padding="same", use_bias=False)(b3) b3 = BatchNormalization()(b3) b3 = Activation("relu")(b3) """ out_shape=int(input_shape[0]/out_stride) b4=AveragePooling2D(pool_size=(out_shape,out_shape))(x) b4=Conv2D(256,(1,1),padding="same",use_bias=False)(b4) b4=BatchNormalization()(b4) b4=Activation("relu")(b4) b4=BilinearUpsampling((out_shape,out_shape))(b4) """ x = Concatenate()([b0, b1, b2, b3]) return x
def depth_wise_convolution_block(input_tensor, filters, depth_wise_strides, depthwise_padding): x = DepthwiseConv2D((3, 3), depth_wise_strides, padding=depthwise_padding, use_bias=False)(input_tensor) x = BatchNormalization()(x) x = Activation('relu')(x) return convolution_block(x, filters)
def SepConv_BN(x, filters, prefix, stride=1, kernel_size=3, rate=1, depth_activation=False, epsilon=1e-3, regularizer_l1=0.0, regularizer_l2=0.0): """ SepConv with BN between depthwise & pointwise. Optionally add activation after BN Implements right "same" padding for even kernel sizes Args: x: input tensor filters: num of filters in pointwise convolution prefix: prefix before name stride: stride at depthwise conv kernel_size: kernel size for depthwise convolution rate: atrous rate for depthwise convolution depth_activation: flag to use activation between depthwise & poinwise convs epsilon: epsilon to use in BN layer """ if stride == 1: depth_padding = 'same' else: kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1) pad_total = kernel_size_effective - 1 pad_beg = pad_total // 2 pad_end = pad_total - pad_beg x = ZeroPadding2D((pad_beg, pad_end))(x) depth_padding = 'valid' if not depth_activation: x = Activation('relu')(x) x = DepthwiseConv2D((kernel_size, kernel_size), strides=(stride, stride), dilation_rate=(rate, rate), padding=depth_padding, use_bias=False, name=prefix + '_depthwise', kernel_regularizer=l1_l2(regularizer_l1, regularizer_l2))(x) x = BatchNormalization(name=prefix + '_depthwise_BN', epsilon=epsilon)(x) if depth_activation: x = Activation('relu')(x) x = Conv2D(filters, (1, 1), padding='same', use_bias=False, name=prefix + '_pointwise', kernel_regularizer=l1_l2(regularizer_l1, regularizer_l2))(x) x = BatchNormalization(name=prefix + '_pointwise_BN', epsilon=epsilon)(x) if depth_activation: x = Activation('relu')(x) return x
def _deconv_block(x, filters, kernel_size=1): x = DepthwiseConv2D(kernel_size=kernel_size, padding="same", use_bias=False)(x) x = BatchNormalization()(x) x = LeakyReLU()(x) x = Conv2D(filters, kernel_size=1, use_bias=False)(x) x = BatchNormalization()(x) x = LeakyReLU()(x) return UpSampling2D()(x)
def mobnet_separable_conv_block(net, num_filters, strides, alpha=1.0): net = DepthwiseConv2D(kernel_size=3, padding='same')(net) net = BatchNormalization(momentum=0.9997)(net) net = Activation('relu')(net) net = Conv2D(int(np.floor(num_filters * alpha)), kernel_size=(1, 1), strides=strides, use_bias=False, padding='same')(net) net = BatchNormalization(momentum=0.9997)(net) net = Activation('relu')(net) return net
def MobilenetSeparableConv2D(input, filters, kernel_size, strides=(1, 1), padding='valid', use_bias=True): x = DepthwiseConv2D(kernel_size, padding=padding, use_bias=use_bias, strides=strides)(input) x = BatchNormalization()(x) x = ReLU(6.)(x) x = Conv2D(filters, 1, padding='same', use_bias=use_bias, strides=1)(x) x = BatchNormalization()(x) x = ReLU(6.)(x) return x
def _inverted_res_block(inputs, expansion, stride, alpha, filters, block_id): in_channels = backend.int_shape(inputs)[-1] pointwise_conv_filters = int(filters * alpha) pointwise_filters = _make_divisible(pointwise_conv_filters, 8) x = inputs prefix = 'block_{}_'.format(block_id) if block_id: # Expand x = Conv2D(expansion * in_channels, kernel_size=1, padding='same', use_bias=False, activation=None, name=prefix + 'expand')(x) x = BatchNormalization(epsilon=1e-3, momentum=0.999, name=prefix + 'expand_BN')(x) x = ReLU(6., name=prefix + 'expand_relu')(x) else: prefix = 'expanded_conv_' # Depthwise if stride == 2: x = ZeroPadding2D(padding=correct_pad(backend, x, 3), name=prefix + 'pad')(x) x = DepthwiseConv2D(kernel_size=3, strides=stride, activation=None, use_bias=False, padding='same' if stride == 1 else 'valid', name=prefix + 'depthwise')(x) x = BatchNormalization(epsilon=1e-3, momentum=0.999, name=prefix + 'depthwise_BN')(x) x = ReLU(6., name=prefix + 'depthwise_relu')(x) # Project x = Conv2D(pointwise_filters, kernel_size=1, padding='same', use_bias=False, activation=None, name=prefix + 'project')(x) x = BatchNormalization(epsilon=1e-3, momentum=0.999, name=prefix + 'project_BN')(x) if in_channels == pointwise_filters and stride == 1: return Add(name=prefix + 'add')([inputs, x]) return x
def SepConvBlock(input_tensor, filters, strides): output_tensor = input_tensor output_tensor = ZeroPadding2D()(output_tensor) output_tensor = DepthwiseConv2D(kernel_size=(3, 3), strides=strides)(output_tensor) output_tensor = BatchNormalization()(output_tensor) output_tensor = Conv2D(kernel_size=(1, 1), filters=filters)(output_tensor) output_tensor = BatchNormalization()(output_tensor) output_tensor = LeakyReLU(alpha=0.1)(output_tensor) return output_tensor
def inverted_res_block(inputs, filters, skip_connection, expansion=3, stride=1, alpha=1, rate=1): in_channels = inputs.shape[-1].value pointwise_conv_filters = int(filters * alpha) pointwise_filters = _make_divisible(pointwise_conv_filters, 8) x = inputs x = Conv2D(expansion * in_channels, kernel_size=1, padding='same', use_bias=False, activation=None)(x) x = BatchNormalization(epsilon=1e-3, momentum=0.999)(x) x = Activation(relu6)(x) x = DepthwiseConv2D(kernel_size=3, strides=stride, use_bias=False, padding='same', dilation_rate=rate)(x) x = BatchNormalization(epsilon=1e-3, momentum=0.999)(x) x = Activation(relu6)(x) x = Conv2D(pointwise_filters, kernel_size=1, padding='same', use_bias=False)(x) x = BatchNormalization(epsilon=1e-3, momentum=0.999)(x) if skip_connection: return Add()([inputs, x]) return x
def SepConv_BN(x, filters, prefix, stride=1, kernel_size=3, rate=1, depth_activation=False, epsilon=1e-3): if stride == 1: depth_padding = 'same' else: kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1) pad_total = kernel_size_effective - 1 pad_beg = pad_total // 2 pad_end = pad_total - pad_beg x = ZeroPadding2D((pad_beg, pad_end))(x) depth_padding = 'valid' if not depth_activation: x = Activation('relu')(x) x = DepthwiseConv2D((kernel_size, kernel_size), strides=(stride, stride), dilation_rate=(rate, rate), padding=depth_padding, use_bias=False, name=prefix + '_depthwise')(x) x = BatchNormalization(name=prefix + '_depthwise_BN', epsilon=epsilon)(x) if depth_activation: x = Activation('relu')(x) x = Conv2D(filters, (1, 1), padding='same', use_bias=False, name=prefix + '_pointwise')(x) x = BatchNormalization(name=prefix + '_pointwise_BN', epsilon=epsilon)(x) if depth_activation: x = Activation('relu')(x) return x
def vgg_dep_wise_3x3(input_shape, num_classes): inp = Input(input_shape) kernel_size = 3 dilation_rate = 1 strides = 1 x = Conv2D(16, (kernel_size, kernel_size), padding='same', strides=strides, dilation_rate=dilation_rate)(inp) x = BatchNormalization()(x) x = Activation('relu')(x) x = DepthwiseConv2D((kernel_size, kernel_size), padding='same', strides=strides, dilation_rate=dilation_rate)(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(24, (kernel_size, kernel_size), padding='same', strides=strides, dilation_rate=dilation_rate)(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = DepthwiseConv2D((kernel_size, kernel_size), padding='same', strides=strides, dilation_rate=dilation_rate)(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = DepthwiseConv2D((kernel_size, kernel_size), padding='same', strides=strides, dilation_rate=dilation_rate)(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(32, (kernel_size, kernel_size), padding='same', strides=strides, dilation_rate=dilation_rate)(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = DepthwiseConv2D((kernel_size, kernel_size), padding='same', strides=strides, dilation_rate=dilation_rate)(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = DepthwiseConv2D((kernel_size, kernel_size), padding='same', strides=strides, dilation_rate=dilation_rate)(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = DepthwiseConv2D((kernel_size, kernel_size), padding='same', strides=strides, dilation_rate=dilation_rate)(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(48, (kernel_size, kernel_size), padding='same', strides=strides, dilation_rate=dilation_rate)(x) x = GlobalAveragePooling2D()(x) x = Dense(num_classes)(x) out = Activation('softmax')(x) model = tf.keras.models.Model([inp], [out]) return model return model
def EEGNet(nb_classes, Chans = 64, Samples = 128, dropoutRate = 0.5, kernLength = 64, F1 = 8, D = 2, F2 = 16, norm_rate = 0.25, dropoutType = 'Dropout'): """ Keras Implementation of EEGNet http://iopscience.iop.org/article/10.1088/1741-2552/aace8c/meta Note that this implements the newest version of EEGNet and NOT the earlier version (version v1 and v2 on arxiv). We strongly recommend using this architecture as it performs much better and has nicer properties than our earlier version. For example: 1. Depthwise Convolutions to learn spatial filters within a temporal convolution. The use of the depth_multiplier option maps exactly to the number of spatial filters learned within a temporal filter. This matches the setup of algorithms like FBCSP which learn spatial filters within each filter in a filter-bank. This also limits the number of free parameters to fit when compared to a fully-connected convolution. 2. Separable Convolutions to learn how to optimally combine spatial filters across temporal bands. Separable Convolutions are Depthwise Convolutions followed by (1x1) Pointwise Convolutions. While the original paper used Dropout, we found that SpatialDropout2D sometimes produced slightly better results for classification of ERP signals. However, SpatialDropout2D significantly reduced performance on the Oscillatory dataset (SMR, BCI-IV Dataset 2A). We recommend using the default Dropout in most cases. Assumes the input signal is sampled at 128Hz. If you want to use this model for any other sampling rate you will need to modify the lengths of temporal kernels and average pooling size in blocks 1 and 2 as needed (double the kernel lengths for double the sampling rate, etc). Note that we haven't tested the model performance with this rule so this may not work well. The model with default parameters gives the EEGNet-8,2 model as discussed in the paper. This model should do pretty well in general, although it is advised to do some model searching to get optimal performance on your particular dataset. We set F2 = F1 * D (number of input filters = number of output filters) for the SeparableConv2D layer. We haven't extensively tested other values of this parameter (say, F2 < F1 * D for compressed learning, and F2 > F1 * D for overcomplete). We believe the main parameters to focus on are F1 and D. Inputs: nb_classes : int, number of classes to classify Chans, Samples : number of channels and time points in the EEG data dropoutRate : dropout fraction kernLength : length of temporal convolution in first layer. We found that setting this to be half the sampling rate worked well in practice. For the SMR dataset in particular since the data was high-passed at 4Hz we used a kernel length of 32. F1, F2 : number of temporal filters (F1) and number of pointwise filters (F2) to learn. Default: F1 = 8, F2 = F1 * D. D : number of spatial filters to learn within each temporal convolution. Default: D = 2 dropoutType : Either SpatialDropout2D or Dropout, passed as a string. """ if dropoutType == 'SpatialDropout2D': dropoutType = SpatialDropout2D elif dropoutType == 'Dropout': dropoutType = Dropout else: raise ValueError('dropoutType must be one of SpatialDropout2D ' 'or Dropout, passed as a string.') input1 = Input(shape = (1, Chans, Samples)) ################################################################## block1 = Conv2D(F1, (1, kernLength), padding = 'same', input_shape = (1, Chans, Samples), use_bias = False)(input1) block1 = BatchNormalization(axis = 1)(block1) block1 = DepthwiseConv2D((Chans, 1), use_bias = False, depth_multiplier = D, depthwise_constraint = max_norm(1.))(block1) block1 = BatchNormalization(axis = 1)(block1) block1 = Activation('elu')(block1) block1 = AveragePooling2D((1, 4))(block1) block1 = dropoutType(dropoutRate)(block1) block2 = SeparableConv2D(F2, (1, 16), use_bias = False, padding = 'same')(block1) block2 = BatchNormalization(axis = 1)(block2) block2 = Activation('elu')(block2) block2 = AveragePooling2D((1, 8))(block2) block2 = dropoutType(dropoutRate)(block2) flatten = Flatten(name = 'flatten')(block2) dense = Dense(nb_classes, name = 'dense', kernel_constraint = max_norm(norm_rate))(flatten) softmax = Activation('softmax', name = 'softmax')(dense) return Model(inputs=input1, outputs=softmax)
def _depthwise_conv_block(inputs, pointwise_conv_filters, alpha, depth_multiplier=1, strides=(1, 1), block_id=1): """Adds a depthwise convolution block. A depthwise convolution block consists of a depthwise conv, batch normalization, relu6, pointwise convolution, batch normalization and relu6 activation. Arguments: inputs: Input tensor of shape `(rows, cols, channels)` (with `channels_last` data format) or (channels, rows, cols) (with `channels_first` data format). pointwise_conv_filters: Integer, the dimensionality of the output space (i.e. the number of output filters in the pointwise convolution). alpha: controls the width of the network. - If `alpha` < 1.0, proportionally decreases the number of filters in each layer. - If `alpha` > 1.0, proportionally increases the number of filters in each layer. - If `alpha` = 1, default number of filters from the paper are used at each layer. depth_multiplier: The number of depthwise convolution output channels for each input channel. The total number of depthwise convolution output channels will be equal to `filters_in * depth_multiplier`. strides: An integer or tuple/list of 2 integers, specifying the strides of the convolution along the width and height. Can be a single integer to specify the same value for all spatial dimensions. Specifying any stride value != 1 is incompatible with specifying any `dilation_rate` value != 1. block_id: Integer, a unique identification designating the block number. Input shape: 4D tensor with shape: `(batch, channels, rows, cols)` if data_format='channels_first' or 4D tensor with shape: `(batch, rows, cols, channels)` if data_format='channels_last'. Output shape: 4D tensor with shape: `(batch, filters, new_rows, new_cols)` if data_format='channels_first' or 4D tensor with shape: `(batch, new_rows, new_cols, filters)` if data_format='channels_last'. `rows` and `cols` values might have changed due to stride. Returns: Output tensor of block. """ channel_axis = 1 if K.image_data_format() == 'channels_first' else -1 pointwise_conv_filters = int(pointwise_conv_filters * alpha) x = ZeroPadding2D(padding=(1, 1), name='conv_pad_%d' % block_id)(inputs) x = DepthwiseConv2D( # pylint: disable=not-callable (3, 3), padding='valid', depth_multiplier=depth_multiplier, strides=strides, use_bias=False, name='conv_dw_%d' % block_id)(x) x = BatchNormalization(axis=channel_axis, name='conv_dw_%d_bn' % block_id)(x) x = Activation(relu6, name='conv_dw_%d_relu' % block_id)(x) x = Conv2D(pointwise_conv_filters, (1, 1), padding='same', use_bias=False, strides=(1, 1), name='conv_pw_%d' % block_id)(x) x = BatchNormalization(axis=channel_axis, name='conv_pw_%d_bn' % block_id)(x) return Activation(relu6, name='conv_pw_%d_relu' % block_id)(x)
def create_pyramid_level(backbone_input, upsamplelike_input=None, addition_input=None, upsample_type='upsamplelike', level=5, ndim=2, lite=False, interpolation='bilinear', feature_size=256): """Create a pyramid layer from a particular backbone input layer. Args: backbone_input (layer): Backbone layer to use to create they pyramid layer upsamplelike_input (tensor): Optional input to use as a template for shape to upsample to addition_input (layer): Optional layer to add to pyramid layer after convolution and upsampling. upsample_type (str, optional): Choice of upsampling methods from ['upsamplelike','upsampling2d','upsampling3d']. Defaults to 'upsamplelike'. level (int): Level to use in layer names, defaults to 5. feature_size (int):Number of filters for convolutional layer, defaults to 256. ndim (int): The spatial dimensions of the input data. Default is 2, but it also works with 3 lite (bool): Whether to use depthwise conv instead of regular conv for feature pyramid construction interpolation (str): Choice of interpolation mode for upsampling layers from ['bilinear', 'nearest']. Defaults to bilinear. Returns: tuple: Pyramid layer after processing, upsampled pyramid layer Raises: ValueError: ndim is not 2 or 3 ValueError: upsample_type not ['upsamplelike','upsampling2d', 'upsampling3d'] """ # Check input to ndims acceptable_ndims = {2, 3} if ndim not in acceptable_ndims: raise ValueError('Only 2 and 3 dimensional networks are supported') # Check if inputs to ndim and lite are compatible if ndim == 3 and lite: raise ValueError('lite models are not compatible with 3 dimensional ' 'networks') # Check input to interpolation acceptable_interpolation = {'bilinear', 'nearest'} if interpolation not in acceptable_interpolation: raise ValueError('Interpolation mode "{}" not supported. ' 'Choose from {}.'.format( interpolation, list(acceptable_interpolation))) # Check input to upsample_type acceptable_upsample = {'upsamplelike', 'upsampling2d', 'upsampling3d'} if upsample_type not in acceptable_upsample: raise ValueError('Upsample method "{}" not supported. ' 'Choose from {}.'.format(upsample_type, list(acceptable_upsample))) reduced_name = 'C{}_reduced'.format(level) upsample_name = 'P{}_upsampled'.format(level) addition_name = 'P{}_merged'.format(level) final_name = 'P{}'.format(level) # Apply 1x1 conv to backbone layer if ndim == 2: pyramid = Conv2D(feature_size, (1, 1), strides=(1, 1), padding='same', name=reduced_name)(backbone_input) else: pyramid = Conv3D(feature_size, (1, 1, 1), strides=(1, 1, 1), padding='same', name=reduced_name)(backbone_input) # Add and then 3x3 conv if addition_input is not None: pyramid = Add(name=addition_name)([pyramid, addition_input]) # Upsample pyramid input if upsamplelike_input is not None and upsample_type == 'upsamplelike': pyramid_upsample = UpsampleLike(name=upsample_name)( [pyramid, upsamplelike_input]) elif upsample_type == 'upsamplelike': pyramid_upsample = None else: upsampling = UpSampling2D if ndim == 2 else UpSampling3D size = (2, 2) if ndim == 2 else (1, 2, 2) upsampling_kwargs = { 'size': size, 'name': upsample_name, 'interpolation': interpolation } if ndim > 2: del upsampling_kwargs['interpolation'] pyramid_upsample = upsampling(**upsampling_kwargs)(pyramid) if ndim == 2: if lite: pyramid_final = DepthwiseConv2D((3, 3), strides=(1, 1), padding='same', name=final_name)(pyramid) else: pyramid_final = Conv2D(feature_size, (3, 3), strides=(1, 1), padding='same', name=final_name)(pyramid) else: pyramid_final = Conv3D(feature_size, (1, 3, 3), strides=(1, 1, 1), padding='same', name=final_name)(pyramid) return pyramid_final, pyramid_upsample
def inverted_residuals(inputs, filters, kernel, stride, expansion=1, alpha=1.0, atrous_rate=1, residual=True, block_id=None): """Define Inveterd Residuals Architecture. inputs --> 1x1 Conv --> Batch Norm --> Relu6 --> 3x3 DepthWise --> Batch Norm --> Relu6 --> 1x1 Conv --> Batch Norm --> Relu6 -- > Outputs Args: inputs - tf.float32 4D Tensor filters: number of expected output channels strides: """ scope = 'expanded_conv_' + str(block_id) if block_id else 'expanded_conv' with tf.variable_scope(scope): # ####################################################### # Expand and Pointwise # ####################################################### if block_id: with tf.variable_scope('expand'): in_channels = inputs.get_shape().as_list()[-1] x = Conv2D(filters=expansion * in_channels, kernel_size=1, padding='SAME', use_bias=False, activation=None)(inputs) x = BatchNormalization(epsilon=1e-3, momentum=0.999)(x) x = Activation(relu6)(x) else: x = inputs # ######################################################## # Depthwise # ######################################################## with tf.variable_scope('depthwise'): x = DepthwiseConv2D(kernel_size=kernel, strides=stride, activation=None, use_bias=False, dilation_rate=(atrous_rate, atrous_rate), padding='SAME')(x) x = BatchNormalization(epsilon=1e-3, momentum=0.999)(x) x = Activation(relu6)(x) # ######################################################## # Linear Projection # ######################################################## with tf.variable_scope('project'): pointwise_filters = int(filters * alpha) pointwise_filters = _make_divisible(pointwise_filters, 8) # Why 8??? x = Conv2D(filters=pointwise_filters, kernel_size=1, padding='SAME', use_bias=False, activation=None)(x) x = BatchNormalization(epsilon=1e-3, momentum=0.999)(x) x = Activation(relu6)(x) if residual: x = Add()([inputs, x]) return x
def FaceMobileNet(input_tensor, alpha=1.0): output_tensor = input_tensor output_tensor = ZeroPadding2D()(output_tensor) output_tensor = Conv2D(filters=r(64 * alpha), kernel_size=(3, 3), strides=(2, 2), use_bias=False)(output_tensor) output_tensor = BatchNormalization( epsilon=batch_norm_eps, momentum=batch_norm_momentum)(output_tensor) output_tensor = ReLU(relu_threshold)(output_tensor) output_tensor = ZeroPadding2D()(output_tensor) output_tensor = DepthwiseConv2D(kernel_size=(3, 3), use_bias=False)(output_tensor) output_tensor = BatchNormalization( epsilon=batch_norm_eps, momentum=batch_norm_momentum)(output_tensor) output_tensor = ReLU(relu_threshold)(output_tensor) output_tensor = inverted_res_block(output_tensor, filters=64, alpha=alpha, stride=2, expansion=2) output_tensor = inverted_res_block(output_tensor, filters=64, alpha=alpha, stride=1, expansion=2) output_tensor = inverted_res_block(output_tensor, filters=64, alpha=alpha, stride=1, expansion=2) output_tensor = inverted_res_block(output_tensor, filters=64, alpha=alpha, stride=1, expansion=2) output_tensor = inverted_res_block(output_tensor, filters=64, alpha=alpha, stride=1, expansion=2) output_tensor = inverted_res_block(output_tensor, filters=128, alpha=alpha, stride=2, expansion=4) output_tensor = inverted_res_block(output_tensor, filters=128, alpha=alpha, stride=1, expansion=2) output_tensor = inverted_res_block(output_tensor, filters=128, alpha=alpha, stride=1, expansion=2) output_tensor = inverted_res_block(output_tensor, filters=128, alpha=alpha, stride=1, expansion=2) output_tensor = inverted_res_block(output_tensor, filters=128, alpha=alpha, stride=1, expansion=2) output_tensor = inverted_res_block(output_tensor, filters=128, alpha=alpha, stride=1, expansion=2) output_tensor = inverted_res_block(output_tensor, filters=128, alpha=alpha, stride=1, expansion=2) output_tensor = inverted_res_block(output_tensor, filters=128, alpha=alpha, stride=2, expansion=4) output_tensor = inverted_res_block(output_tensor, filters=128, alpha=alpha, stride=1, expansion=2) output_tensor = inverted_res_block(output_tensor, filters=128, alpha=alpha, stride=1, expansion=2) output_tensor = Conv2D(filters=r(512 * alpha), kernel_size=(1, 1), use_bias=False)(output_tensor) output_tensor = BatchNormalization( epsilon=batch_norm_eps, momentum=batch_norm_momentum)(output_tensor) output_tensor = ReLU(relu_threshold)(output_tensor) output_tensor = DepthwiseConv2D( kernel_size=(output_tensor.shape.as_list()[1], output_tensor.shape.as_list()[2]), use_bias=False)(output_tensor) output_tensor = BatchNormalization( epsilon=batch_norm_eps, momentum=batch_norm_momentum)(output_tensor) output_tensor = Conv2D(filters=r(128 * alpha), kernel_size=(1, 1), use_bias=False)(output_tensor) output_tensor = BatchNormalization( epsilon=batch_norm_eps, momentum=batch_norm_momentum)(output_tensor) return output_tensor
def EEGNet_SSVEP(nb_classes, Chans=64, Samples=128, regRate=0.0001, dropoutRate=0.25, kernLength=64, numFilters=8): """ Keras Implementation of the variant of EEGNet that was used to classify signals from an SSVEP task (https://arxiv.org/abs/1803.04566) Inputs: nb_classes : int, number of classes to classify Chans, Samples : number of channels and time points in the EEG data regRate : regularization parameter for L1 and L2 penalties dropoutRate : dropout fraction kernLength : length of temporal convolution in first layer numFilters : number of temporal-spatial filter pairs to learn """ input1 = Input(shape=(1, Chans, Samples)) ################################################################## layer1 = Conv2D(numFilters, (1, kernLength), padding='same', kernel_regularizer=l1_l2(l1=0.0, l2=0.0), input_shape=(1, Chans, Samples), use_bias=False)(input1) layer1 = BatchNormalization(axis=1)(layer1) layer1 = DepthwiseConv2D((Chans, 1), depthwise_regularizer=l1_l2(l1=regRate, l2=regRate), use_bias=False)(layer1) layer1 = BatchNormalization(axis=1)(layer1) layer1 = Activation('elu')(layer1) layer1 = SpatialDropout2D(dropoutRate)(layer1) layer2 = SeparableConv2D(numFilters, (1, 8), depthwise_regularizer=l1_l2(l1=0.0, l2=regRate), use_bias=False, padding='same')(layer1) layer2 = BatchNormalization(axis=1)(layer2) layer2 = Activation('elu')(layer2) layer2 = AveragePooling2D((1, 4))(layer2) layer2 = SpatialDropout2D(dropoutRate)(layer2) layer3 = SeparableConv2D(numFilters * 2, (1, 8), depth_multiplier=2, depthwise_regularizer=l1_l2(l1=0.0, l2=regRate), use_bias=False, padding='same')(layer2) layer3 = BatchNormalization(axis=1)(layer3) layer3 = Activation('elu')(layer3) layer3 = AveragePooling2D((1, 4))(layer3) layer3 = SpatialDropout2D(dropoutRate)(layer3) flatten = Flatten(name='flatten')(layer3) dense = Dense(nb_classes, name='dense')(flatten) softmax = Activation('softmax', name='softmax')(dense) return Model(inputs=input1, outputs=softmax)