예제 #1
0
def middle_flow(inputs, filters, block, use_bias=False, bn_axis=-1):

    sparable_name = "conv" + str(block) + "_separable"
    residual = inputs

    x = Activation('relu')(inputs)
    x = SeparableConv2D(filters, (3, 3),
                        padding='same',
                        use_bias=use_bias,
                        kernel_initializer='he_normal',
                        kernel_regularizer=l2(l2_norm),
                        name=sparable_name + "1")(x)
    x = BatchNormalization(axis=bn_axis, name=sparable_name + "1_bn")(x)
    x = Activation('relu')(x)
    x = SeparableConv2D(filters, (3, 3),
                        padding='same',
                        use_bias=use_bias,
                        kernel_initializer='he_normal',
                        kernel_regularizer=l2(l2_norm),
                        name=sparable_name + "2")(x)
    x = BatchNormalization(axis=bn_axis, name=sparable_name + "2_bn")(x)
    x = Activation('relu')(x)
    x = SeparableConv2D(filters, (3, 3),
                        padding='same',
                        use_bias=use_bias,
                        kernel_initializer='he_normal',
                        kernel_regularizer=l2(l2_norm),
                        name=sparable_name + "3")(x)
    x = BatchNormalization(axis=bn_axis, name=sparable_name + "2_bn")(x)

    x = add([x, residual])

    return x
예제 #2
0
def xception_block(input_layer,
                   filter,
                   last_stride,
                   last_rate,
                   name,
                   residual_type='conv',
                   return_skip=False):
    if type(filter) is int:
        filters = [filter, filter, filter]
    else:
        filters = filter

    x = input_layer

    x = SeparableConv2D(filters[0],
                        kernel_size=3,
                        padding='same',
                        use_bias=False,
                        name=name + '_sepconv_1')(x)
    x = BatchNormalization(name=name + '_sepconv_1_bn')(x)
    x = ReLU()(x)

    x = SeparableConv2D(filters[1],
                        kernel_size=3,
                        padding='same',
                        use_bias=False,
                        name=name + '_sepconv_2')(x)
    x = BatchNormalization(name=name + '_sepconv_2_bn')(x)
    x = ReLU()(x)
    skip = x

    x = SeparableConv2D(filters[2],
                        kernel_size=3,
                        strides=last_stride,
                        dilation_rate=last_rate,
                        padding='same',
                        use_bias=False,
                        name=name + '_atrous_sepconv')(x)
    x = BatchNormalization(name=name + '_atrous_sepconv_bn')(x)
    x = ReLU()(x)

    if residual_type == 'conv':
        res = Conv2D(filters=filters[2],
                     kernel_size=1,
                     strides=last_stride,
                     padding='same',
                     use_bias=False,
                     name=name + "_residual")(input_layer)
        res = BatchNormalization(name=name + "_residual_bn")(res)
        # res = ReLU()(res)

        x = add([x, res])

    elif residual_type == 'add':
        x = add([x, input_layer])

    if return_skip:
        return x, skip

    return x
예제 #3
0
    def __init__(self,
                 channels,
                 name=None,
                 activation=None,
                 random=False,
                 input_shape=None,
                 strides=(1, 1),
                 kernel_size=(3, 3),
                 N=None,
                 rand_args=None):
        """
        Keras layer that encapsule the triplet ReLU-Conv-BN.
        The Conv component can either be a single separable
        convolution or a RandLayer, depending on the 'random'
        parameter.

        Arguments:
            channels: number of filters.
            name: layer's name. Useful for the summary.
            activation: activation function. Usually ReLU.
            random: bool defining what type of layer this is.
            input_shape: shape of the input, e.g. (None, height, width, channels).
                Necessary only for the first layer of the network.
            strides: stride of the convolution. Only used if random=False.
            kernel_size: size of the filter.
            N: number of nodes in the random layer.
            rand_args: hyperparameters of the random layer.
        """

        super(Triplet, self).__init__(name=name)

        if activation is None or activation == 'linear':
            self.activation = Activation('linear')
        elif activation == 'relu':
            self.activation = Activation('relu')

        if random:
            assert rand_args is not None
            self.conv = RandLayer(channels, rand_args, activation, N)
        else:
            if input_shape is None:
                self.conv = SeparableConv2D(
                    filters=channels,
                    kernel_size=kernel_size,
                    kernel_regularizer=l2(WEIGHT_DECAY),
                    strides=strides,
                    padding='same')
            else:  # Only in the first layer
                self.conv = SeparableConv2D(
                    filters=channels,
                    kernel_size=kernel_size,
                    kernel_regularizer=l2(WEIGHT_DECAY),
                    strides=strides,
                    padding='same',
                    input_shape=input_shape)

        self.bn = BatchNormalization()
예제 #4
0
def _separable_conv_block(ip,
                          filters,
                          kernel_size=(3, 3),
                          strides=(1, 1),
                          block_id=None):
  """Adds 2 blocks of [relu-separable conv-batchnorm].

  Arguments:
      ip: Input tensor
      filters: Number of output filters per layer
      kernel_size: Kernel size of separable convolutions
      strides: Strided convolution for downsampling
      block_id: String block_id

  Returns:
      A Keras tensor
  """
  channel_dim = 1 if K.image_data_format() == 'channels_first' else -1

  with K.name_scope('separable_conv_block_%s' % block_id):
    x = Activation('relu')(ip)
    x = SeparableConv2D(
        filters,
        kernel_size,
        strides=strides,
        name='separable_conv_1_%s' % block_id,
        padding='same',
        use_bias=False,
        kernel_initializer='he_normal')(
            x)
    x = BatchNormalization(
        axis=channel_dim,
        momentum=0.9997,
        epsilon=1e-3,
        name='separable_conv_1_bn_%s' % (block_id))(
            x)
    x = Activation('relu')(x)
    x = SeparableConv2D(
        filters,
        kernel_size,
        name='separable_conv_2_%s' % block_id,
        padding='same',
        use_bias=False,
        kernel_initializer='he_normal')(
            x)
    x = BatchNormalization(
        axis=channel_dim,
        momentum=0.9997,
        epsilon=1e-3,
        name='separable_conv_2_bn_%s' % (block_id))(
            x)
  return x
예제 #5
0
def build_model():
    input_img = Input(shape=(224, 224, 3), name='ImageInput')
    x = Conv2D(64, (3, 3), activation='relu', padding='same',
               name='Conv1_1')(input_img)
    x = Conv2D(64, (3, 3), activation='relu', padding='same',
               name='Conv1_2')(x)
    x = MaxPooling2D((2, 2), name='pool1')(x)

    x = SeparableConv2D(128, (3, 3),
                        activation='relu',
                        padding='same',
                        name='Conv2_1')(x)
    x = SeparableConv2D(128, (3, 3),
                        activation='relu',
                        padding='same',
                        name='Conv2_2')(x)
    x = MaxPooling2D((2, 2), name='pool2')(x)

    x = SeparableConv2D(256, (3, 3),
                        activation='relu',
                        padding='same',
                        name='Conv3_1')(x)
    x = BatchNormalization(name='bn1')(x)
    x = SeparableConv2D(256, (3, 3),
                        activation='relu',
                        padding='same',
                        name='Conv3_2')(x)
    x = BatchNormalization(name='bn2')(x)
    x = SeparableConv2D(256, (3, 3),
                        activation='relu',
                        padding='same',
                        name='Conv3_3')(x)
    x = MaxPooling2D((2, 2), name='pool3')(x)

    x = SeparableConv2D(512, (3, 3),
                        activation='relu',
                        padding='same',
                        name='Conv4_1')(x)
    x = BatchNormalization(name='bn3')(x)
    x = SeparableConv2D(512, (3, 3),
                        activation='relu',
                        padding='same',
                        name='Conv4_2')(x)
    x = BatchNormalization(name='bn4')(x)
    x = SeparableConv2D(512, (3, 3),
                        activation='relu',
                        padding='same',
                        name='Conv4_3')(x)
    x = MaxPooling2D((2, 2), name='pool4')(x)

    x = Flatten(name='flatten')(x)
    x = Dense(1024, activation='relu', name='fc1')(x)
    x = Dropout(0.7, name='dropout1')(x)
    x = Dense(512, activation='relu', name='fc2')(x)
    x = Dropout(0.5, name='dropout2')(x)
    x = Dense(2, activation='softmax', name='fc3')(x)

    model = Model(inputs=input_img, outputs=x)
    return model
예제 #6
0
def _text_recognition_vertical_model(input_shape, n_vocab):
    roi = Input(shape=input_shape, name="roi_vertical")
    x = roi
    for c in [64, 128, 256]:
        x = SeparableConv2D(c, 3, padding="same")(x)
        # TODO(agatan): if input_shape contains 0, GroupNormalization can generate nan weights.
        # x = GroupNormalization()(x)
        x = ReLU(6.)(x)
        x = SeparableConv2D(c, 3, padding="same")(x)
        # x = GroupNormalization()(x)
        x = ReLU(6.)(x)
        x = MaxPooling2D((1, 2))(x)
    x = Lambda(lambda v: tf.squeeze(v, 2))(x)
    x = Dropout(0.2)(x)
    output = Dense(n_vocab, activation="softmax")(x)
    return Model(roi, output, name="vertical_model")
예제 #7
0
    def call(self, x, **kwargs):
        x = BatchNormalization()(x)
        x = Conv2D(filters=self.depth,
                   kernel_size=1,
                   use_bias=False,
                   data_format='channels_last',
                   padding='same')(x)
        x = BatchNormalization()(x)
        x = SwishLayer()(x)
        x = SeparableConv2D(filters=self.depth,
                            kernel_size=5,
                            use_bias=False,
                            data_format='channels_last',
                            padding='same')(x)
        x = BatchNormalization()(x)
        x = SwishLayer()(x)
        x = Conv2D(filters=self.depth,
                   kernel_size=1,
                   use_bias=False,
                   data_format='channels_last',
                   padding='same')(x)
        x = BatchNormalization()(x)
        x = SELayer(depth=self.depth)(x)

        return x
    def conv_bottleneck_ds(x,
                           kernel,
                           filters,
                           downsample,
                           name,
                           padding='same',
                           bottleneck=0.5):
        """
            Bottleneck -> Depthwise Separable
            (Pointwise->Depthwise->Pointswise)
            MobileNetV2 style
            """
        if padding == 'valid':
            pad = ((0, kernel[0] // 2), (0, kernel[0] // 2))
            x = ZeroPadding2D(padding=pad, name=name + 'pad')(x)

        x = Conv2D(int(filters * bottleneck), (1, 1),
                   padding='same',
                   strides=downsample,
                   name=name + '_pw')(x)
        add_common(x, name + '_pw')

        x = SeparableConv2D(filters,
                            kernel,
                            padding=padding,
                            strides=(1, 1),
                            name=name + '_ds')(x)
        return add_common(x, name + '_ds')
예제 #9
0
def EEGNet_SSVEP(nb_classes = 12, Chans = 8, Samples = 256, 
             dropoutRate = 0.5, kernLength = 256, F1 = 96, 
             D = 1, F2 = 96, dropoutType = 'Dropout'):
    """ SSVEP Variant of EEGNet, as used in [1]. 
    Inputs:
        
      nb_classes      : int, number of classes to classify
      Chans, Samples  : number of channels and time points in the EEG data
      dropoutRate     : dropout fraction
      kernLength      : length of temporal convolution in first layer
      F1, F2          : number of temporal filters (F1) and number of pointwise
                        filters (F2) to learn. 
      D               : number of spatial filters to learn within each temporal
                        convolution.
      dropoutType     : Either SpatialDropout2D or Dropout, passed as a string.
      
      
    [1]. Waytowich, N. et. al. (2018). Compact Convolutional Neural Networks
    for Classification of Asynchronous Steady-State Visual Evoked Potentials.
    Journal of Neural Engineering vol. 15(6). 
    http://iopscience.iop.org/article/10.1088/1741-2552/aae5d8
    """
    
    if dropoutType == 'SpatialDropout2D':
        dropoutType = SpatialDropout2D
    elif dropoutType == 'Dropout':
        dropoutType = Dropout
    else:
        raise ValueError('dropoutType must be one of SpatialDropout2D '
                         'or Dropout, passed as a string.')
    
    input1   = Input(shape = (1, Chans, Samples))

    ##################################################################
    block1       = Conv2D(F1, (1, kernLength), padding = 'same',
                                   input_shape = (1, Chans, Samples),
                                   use_bias = False)(input1)
    block1       = BatchNormalization(axis = 1)(block1)
    block1       = DepthwiseConv2D((Chans, 1), use_bias = False, 
                                   depth_multiplier = D,
                                   depthwise_constraint = max_norm(1.))(block1)
    block1       = BatchNormalization(axis = 1)(block1)
    block1       = Activation('elu')(block1)
    block1       = AveragePooling2D((1, 4))(block1)
    block1       = dropoutType(dropoutRate)(block1)
    
    block2       = SeparableConv2D(F2, (1, 16),
                                   use_bias = False, padding = 'same')(block1)
    block2       = BatchNormalization(axis = 1)(block2)
    block2       = Activation('elu')(block2)
    block2       = AveragePooling2D((1, 8))(block2)
    block2       = dropoutType(dropoutRate)(block2)
        
    flatten      = Flatten(name = 'flatten')(block2)
    
    dense        = Dense(nb_classes, name = 'dense')(flatten)
    softmax      = Activation('softmax', name = 'softmax')(dense)
    
    return Model(inputs=input1, outputs=softmax)
    def __init__(self, filters, kernel, strides):
        '''
        Constructs a Seperable Convolution - Batch Normalization - Relu block.
        '''
        super(SeperableConvolution, self).__init__()

        self.conv = SeparableConv2D(filters, kernel, strides=strides, padding='same',
                                    kernel_initializer='he_uniform')
        self.bn = BatchNormalization()
 def conv_ds(x, kernel, filters, downsample, name, padding='same'):
     """Depthwise Separable convolutional block
         (Depthwise->Pointwise)
         MobileNet style"""
     x = SeparableConv2D(filters,
                         kernel,
                         padding=padding,
                         strides=downsample,
                         name=name + '_ds')(x)
     return add_common(x, name=name + '_ds')
예제 #12
0
def residual_separable(inputs, filters, block, use_bias=False, bn_axis=-1):

    residual_name = "conv" + str(block) + "_residual"
    sparable_name = "conv" + str(block) + "_separable"

    residual = Conv2D(filters[0], (1, 1),
                      strides=(2, 2),
                      padding='same',
                      use_bias=use_bias,
                      kernel_initializer='he_normal',
                      kernel_regularizer=l2(l2_norm),
                      name=residual_name)(inputs)
    residual = BatchNormalization(axis=bn_axis,
                                  name=residual_name + "_bn")(residual)

    if block != 2:
        inputs = Activation('relu')(inputs)
    x = SeparableConv2D(filters[1], (3, 3),
                        padding='same',
                        use_bias=use_bias,
                        kernel_initializer='he_normal',
                        kernel_regularizer=l2(l2_norm),
                        name=sparable_name + "1")(inputs)
    x = BatchNormalization(axis=bn_axis, name=sparable_name + "1_bn")(x)
    x = Activation('relu')(x)
    x = SeparableConv2D(filters[2], (3, 3),
                        padding='same',
                        use_bias=use_bias,
                        kernel_initializer='he_normal',
                        kernel_regularizer=l2(l2_norm),
                        name=sparable_name + "2")(x)
    x = BatchNormalization(axis=bn_axis, name=sparable_name + "2_bn")(x)

    # Pool
    x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
    x = add([x, residual])

    return x
예제 #13
0
 def conv_sep(self, inp, filters, kernel_size=5, strides=2, **kwargs):
     """ Seperable Convolution Layer """
     logger.debug(
         "inp: %s, filters: %s, kernel_size: %s, strides: %s, kwargs: %s)",
         inp, filters, kernel_size, strides, kwargs)
     name = self.get_name("separableconv2d_{}".format(inp.shape[1]))
     kwargs = self.set_default_initializer(kwargs)
     var_x = SeparableConv2D(filters,
                             kernel_size=kernel_size,
                             strides=strides,
                             padding="same",
                             name="{}_seperableconv2d".format(name),
                             **kwargs)(inp)
     var_x = Activation("relu", name="{}_relu".format(name))(var_x)
     return var_x
예제 #14
0
def make_model():
    tf.keras.backend.clear_session()
    loaded = Sequential()
    # First Block
    loaded.add(
        Conv2D(16,
               activation='relu',
               kernel_size=(3, 3),
               padding='same',
               input_shape=(128, 128, 3)))
    loaded.add(
        Conv2D(16, activation='relu', kernel_size=(3, 3), padding='same'))
    loaded.add(MaxPool2D(pool_size=(3, 3)))

    # Second Block
    loaded.add(SeparableConv2D(32, kernel_size=(3, 3), padding='same'))
    loaded.add(SeparableConv2D(32, kernel_size=(3, 3), padding='same'))
    loaded.add(SeparableConv2D(32, kernel_size=(3, 3), padding='same'))
    loaded.add(BatchNormalization())
    loaded.add(Activation('relu'))
    loaded.add(MaxPool2D(pool_size=(3, 3)))

    # Third Block
    loaded.add(SeparableConv2D(64, kernel_size=(3, 3), padding='same'))
    loaded.add(SeparableConv2D(64, kernel_size=(3, 3), padding='same'))
    loaded.add(BatchNormalization())
    loaded.add(Activation('relu'))
    loaded.add(MaxPool2D(pool_size=(3, 3)))

    # Forth Block
    loaded.add(SeparableConv2D(128, kernel_size=(3, 3), padding='same'))
    loaded.add(SeparableConv2D(128, kernel_size=(3, 3), padding='same'))
    loaded.add(BatchNormalization())
    loaded.add(Activation('relu'))
    loaded.add(MaxPool2D(pool_size=(3, 3)))
    loaded.add(Dropout(0.25))

    # Fully Connected Layer
    loaded.add(Flatten())
    loaded.add(Dense(units=128, activation='relu'))

    # Output layer
    loaded.add(Dense(1, activation='sigmoid'))
    loaded.load_weights('./flaskblog/BESTWeights.hdf5')
    return loaded
예제 #15
0
    def __init__(self, n_classes):
        """
        Last layer of the model outputting the probabilities
        for each class.
        Performs a SeparableConv2D 1x1, BN, GlobalAveragePooling2D,
        FC, Dropout, Softmax.

        Arguments:
            n_classes: output size.
        """
        super(Classifier, self).__init__(name='classifier')

        self.conv = SeparableConv2D(filters=1280,
                                    kernel_size=(1, 1),
                                    kernel_regularizer=l2(WEIGHT_DECAY),
                                    activation='relu')
        self.bn = BatchNormalization()
        self.avg_pool = GlobalAveragePooling2D()
        self.fc = Dense(units=n_classes)
        self.droput = Dropout(0.2)
        self.softmax = Softmax()
예제 #16
0
def build(width, height, depth, classes):
    model = Sequential()
    shape = (height, width, depth)
    channel_dim = -1

    if image_data_format() == "channels_first":
        shape = (depth, height, width)
        channel_dim = 1

    model.add(SeparableConv2D(32, (3, 3), padding="same", input_shape=shape))
    model.add(Activation("relu"))
    model.add(BatchNormalization(axis=channel_dim))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(SeparableConv2D(64, (3, 3), padding="same"))
    model.add(Activation("relu"))
    model.add(BatchNormalization(axis=channel_dim))
    model.add(SeparableConv2D(64, (3, 3), padding="same"))
    model.add(Activation("relu"))
    model.add(BatchNormalization(axis=channel_dim))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(SeparableConv2D(128, (3, 3), padding="same"))
    model.add(Activation("relu"))
    model.add(BatchNormalization(axis=channel_dim))
    model.add(SeparableConv2D(128, (3, 3), padding="same"))
    model.add(Activation("relu"))
    model.add(BatchNormalization(axis=channel_dim))
    model.add(SeparableConv2D(128, (3, 3), padding="same"))
    model.add(Activation("relu"))
    model.add(BatchNormalization(axis=channel_dim))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Flatten())
    model.add(Dense(256))
    model.add(Activation("relu"))
    model.add(BatchNormalization())
    model.add(Dropout(0.5))

    model.add(Dense(classes))
    model.add(Activation("softmax"))

    return model
    def build(input_shape=None, classes=3):

        img_input = Input(shape=input_shape)

        channel_axis = 1 if image_data_format() == 'channels_first' else -1

        x = Conv2D(32, (3, 3),
                   strides=(2, 2),
                   use_bias=False,
                   name='block1_conv1')(img_input)
        x = BatchNormalization(axis=channel_axis, name='block1_conv1_bn')(x)
        x = Activation('relu', name='block1_conv1_act')(x)
        x = Conv2D(64, (3, 3), use_bias=False, name='block1_conv2')(x)
        x = BatchNormalization(axis=channel_axis, name='block1_conv2_bn')(x)
        x = Activation('relu', name='block1_conv2_act')(x)

        residual = Conv2D(128, (1, 1),
                          strides=(2, 2),
                          padding='same',
                          use_bias=False)(x)
        residual = BatchNormalization(axis=channel_axis)(residual)

        x = SeparableConv2D(128, (3, 3),
                            padding='same',
                            use_bias=False,
                            name='block2_sepconv1')(x)
        x = BatchNormalization(axis=channel_axis, name='block2_sepconv1_bn')(x)
        x = Activation('relu', name='block2_sepconv2_act')(x)
        x = SeparableConv2D(128, (3, 3),
                            padding='same',
                            use_bias=False,
                            name='block2_sepconv2')(x)
        x = BatchNormalization(axis=channel_axis, name='block2_sepconv2_bn')(x)

        x = MaxPooling2D((3, 3),
                         strides=(2, 2),
                         padding='same',
                         name='block2_pool')(x)
        x = add([x, residual])

        residual = Conv2D(256, (1, 1),
                          strides=(2, 2),
                          padding='same',
                          use_bias=False)(x)
        residual = BatchNormalization(axis=channel_axis)(residual)

        x = Activation('relu', name='block3_sepconv1_act')(x)
        x = SeparableConv2D(256, (3, 3),
                            padding='same',
                            use_bias=False,
                            name='block3_sepconv1')(x)
        x = BatchNormalization(axis=channel_axis, name='block3_sepconv1_bn')(x)
        x = Activation('relu', name='block3_sepconv2_act')(x)
        x = SeparableConv2D(256, (3, 3),
                            padding='same',
                            use_bias=False,
                            name='block3_sepconv2')(x)
        x = BatchNormalization(axis=channel_axis, name='block3_sepconv2_bn')(x)

        x = MaxPooling2D((3, 3),
                         strides=(2, 2),
                         padding='same',
                         name='block3_pool')(x)
        x = add([x, residual])

        residual = Conv2D(728, (1, 1),
                          strides=(2, 2),
                          padding='same',
                          use_bias=False)(x)
        residual = BatchNormalization(axis=channel_axis)(residual)

        x = Activation('relu', name='block4_sepconv1_act')(x)
        x = SeparableConv2D(728, (3, 3),
                            padding='same',
                            use_bias=False,
                            name='block4_sepconv1')(x)
        x = BatchNormalization(axis=channel_axis, name='block4_sepconv1_bn')(x)
        x = Activation('relu', name='block4_sepconv2_act')(x)
        x = SeparableConv2D(728, (3, 3),
                            padding='same',
                            use_bias=False,
                            name='block4_sepconv2')(x)
        x = BatchNormalization(axis=channel_axis, name='block4_sepconv2_bn')(x)

        x = MaxPooling2D((3, 3),
                         strides=(2, 2),
                         padding='same',
                         name='block4_pool')(x)
        x = add([x, residual])

        for i in range(8):
            residual = x
            prefix = 'block' + str(i + 5)

            x = Activation('relu', name=prefix + '_sepconv1_act')(x)
            x = SeparableConv2D(728, (3, 3),
                                padding='same',
                                use_bias=False,
                                name=prefix + '_sepconv1')(x)
            x = BatchNormalization(axis=channel_axis,
                                   name=prefix + '_sepconv1_bn')(x)
            x = Activation('relu', name=prefix + '_sepconv2_act')(x)
            x = SeparableConv2D(728, (3, 3),
                                padding='same',
                                use_bias=False,
                                name=prefix + '_sepconv2')(x)
            x = BatchNormalization(axis=channel_axis,
                                   name=prefix + '_sepconv2_bn')(x)
            x = Activation('relu', name=prefix + '_sepconv3_act')(x)
            x = SeparableConv2D(728, (3, 3),
                                padding='same',
                                use_bias=False,
                                name=prefix + '_sepconv3')(x)
            x = BatchNormalization(axis=channel_axis,
                                   name=prefix + '_sepconv3_bn')(x)

            x = add([x, residual])

        residual = Conv2D(1024, (1, 1),
                          strides=(2, 2),
                          padding='same',
                          use_bias=False)(x)
        residual = BatchNormalization(axis=channel_axis)(residual)

        x = Activation('relu', name='block13_sepconv1_act')(x)
        x = SeparableConv2D(728, (3, 3),
                            padding='same',
                            use_bias=False,
                            name='block13_sepconv1')(x)
        x = BatchNormalization(axis=channel_axis,
                               name='block13_sepconv1_bn')(x)
        x = Activation('relu', name='block13_sepconv2_act')(x)
        x = SeparableConv2D(1024, (3, 3),
                            padding='same',
                            use_bias=False,
                            name='block13_sepconv2')(x)
        x = BatchNormalization(axis=channel_axis,
                               name='block13_sepconv2_bn')(x)

        x = MaxPooling2D((3, 3),
                         strides=(2, 2),
                         padding='same',
                         name='block13_pool')(x)
        x = add([x, residual])

        x = SeparableConv2D(1536, (3, 3),
                            padding='same',
                            use_bias=False,
                            name='block14_sepconv1')(x)
        x = BatchNormalization(axis=channel_axis,
                               name='block14_sepconv1_bn')(x)
        x = Activation('relu', name='block14_sepconv1_act')(x)

        x = SeparableConv2D(2048, (3, 3),
                            padding='same',
                            use_bias=False,
                            name='block14_sepconv2')(x)
        x = BatchNormalization(axis=channel_axis,
                               name='block14_sepconv2_bn')(x)
        x = Activation('relu', name='block14_sepconv2_act')(x)
        x = GlobalAveragePooling2D(name='avg_pool')(x)
        x = Dropout(0.25)(x)
        # softmax classifier
        x = Flatten()(x)
        x = Dense(classes)(x)
        x = Activation("softmax")(x)

        inputs = img_input
        # Create model.
        return Model(inputs, x, name='xception')
예제 #18
0
def Xception(include_top=True,
             weights='imagenet',
             input_tensor=None,
             input_shape=None,
             pooling=None,
             classes=1000):
    """Instantiates the Xception architecture.

  Optionally loads weights pre-trained
  on ImageNet. This model is available for TensorFlow only,
  and can only be used with inputs following the TensorFlow
  data format `(width, height, channels)`.
  You should set `image_data_format='channels_last'` in your Keras config
  located at ~/.keras/keras.json.

  Note that the default input image size for this model is 299x299.

  Arguments:
      include_top: whether to include the fully-connected
          layer at the top of the network.
      weights: one of `None` (random initialization),
            'imagenet' (pre-training on ImageNet),
            or the path to the weights file to be loaded.
      input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
          to use as image input for the model.
      input_shape: optional shape tuple, only to be specified
          if `include_top` is False (otherwise the input shape
          has to be `(299, 299, 3)`.
          It should have exactly 3 inputs channels,
          and width and height should be no smaller than 71.
          E.g. `(150, 150, 3)` would be one valid value.
      pooling: Optional pooling mode for feature extraction
          when `include_top` is `False`.
          - `None` means that the output of the model will be
              the 4D tensor output of the
              last convolutional layer.
          - `avg` means that global average pooling
              will be applied to the output of the
              last convolutional layer, and thus
              the output of the model will be a 2D tensor.
          - `max` means that global max pooling will
              be applied.
      classes: optional number of classes to classify images
          into, only to be specified if `include_top` is True, and
          if no `weights` argument is specified.

  Returns:
      A Keras model instance.

  Raises:
      ValueError: in case of invalid argument for `weights`,
          or invalid input shape.
      RuntimeError: If attempting to run this model with a
          backend that does not support separable convolutions.
  """
    if not (weights in {'imagenet', None} or os.path.exists(weights)):
        raise ValueError('The `weights` argument should be either '
                         '`None` (random initialization), `imagenet` '
                         '(pre-training on ImageNet), '
                         'or the path to the weights file to be loaded.')

    if weights == 'imagenet' and include_top and classes != 1000:
        raise ValueError('If using `weights` as imagenet with `include_top`'
                         ' as true, `classes` should be 1000')

    if K.image_data_format() != 'channels_last':
        logging.warning(
            'The Xception model is only available for the '
            'input data format "channels_last" '
            '(width, height, channels). '
            'However your settings specify the default '
            'data format "channels_first" (channels, width, height). '
            'You should set `image_data_format="channels_last"` in your Keras '
            'config located at ~/.keras/keras.json. '
            'The model being returned right now will expect inputs '
            'to follow the "channels_last" data format.')
        K.set_image_data_format('channels_last')
        old_data_format = 'channels_first'
    else:
        old_data_format = None

    # Determine proper input shape
    input_shape = _obtain_input_shape(input_shape,
                                      default_size=299,
                                      min_size=71,
                                      data_format=K.image_data_format(),
                                      require_flatten=False,
                                      weights=weights)

    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    x = Conv2D(32, (3, 3), strides=(2, 2), use_bias=False,
               name='block1_conv1')(img_input)
    x = BatchNormalization(name='block1_conv1_bn')(x)
    x = Activation('relu', name='block1_conv1_act')(x)
    x = Conv2D(64, (3, 3), use_bias=False, name='block1_conv2')(x)
    x = BatchNormalization(name='block1_conv2_bn')(x)
    x = Activation('relu', name='block1_conv2_act')(x)

    residual = Conv2D(128, (1, 1),
                      strides=(2, 2),
                      padding='same',
                      use_bias=False)(x)
    residual = BatchNormalization()(residual)

    x = SeparableConv2D(128, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block2_sepconv1')(x)
    x = BatchNormalization(name='block2_sepconv1_bn')(x)
    x = Activation('relu', name='block2_sepconv2_act')(x)
    x = SeparableConv2D(128, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block2_sepconv2')(x)
    x = BatchNormalization(name='block2_sepconv2_bn')(x)

    x = MaxPooling2D((3, 3),
                     strides=(2, 2),
                     padding='same',
                     name='block2_pool')(x)
    x = layers.add([x, residual])

    residual = Conv2D(256, (1, 1),
                      strides=(2, 2),
                      padding='same',
                      use_bias=False)(x)
    residual = BatchNormalization()(residual)

    x = Activation('relu', name='block3_sepconv1_act')(x)
    x = SeparableConv2D(256, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block3_sepconv1')(x)
    x = BatchNormalization(name='block3_sepconv1_bn')(x)
    x = Activation('relu', name='block3_sepconv2_act')(x)
    x = SeparableConv2D(256, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block3_sepconv2')(x)
    x = BatchNormalization(name='block3_sepconv2_bn')(x)

    x = MaxPooling2D((3, 3),
                     strides=(2, 2),
                     padding='same',
                     name='block3_pool')(x)
    x = layers.add([x, residual])

    residual = Conv2D(728, (1, 1),
                      strides=(2, 2),
                      padding='same',
                      use_bias=False)(x)
    residual = BatchNormalization()(residual)

    x = Activation('relu', name='block4_sepconv1_act')(x)
    x = SeparableConv2D(728, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block4_sepconv1')(x)
    x = BatchNormalization(name='block4_sepconv1_bn')(x)
    x = Activation('relu', name='block4_sepconv2_act')(x)
    x = SeparableConv2D(728, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block4_sepconv2')(x)
    x = BatchNormalization(name='block4_sepconv2_bn')(x)

    x = MaxPooling2D((3, 3),
                     strides=(2, 2),
                     padding='same',
                     name='block4_pool')(x)
    x = layers.add([x, residual])

    for i in range(8):
        residual = x
        prefix = 'block' + str(i + 5)

        x = Activation('relu', name=prefix + '_sepconv1_act')(x)
        x = SeparableConv2D(728, (3, 3),
                            padding='same',
                            use_bias=False,
                            name=prefix + '_sepconv1')(x)
        x = BatchNormalization(name=prefix + '_sepconv1_bn')(x)
        x = Activation('relu', name=prefix + '_sepconv2_act')(x)
        x = SeparableConv2D(728, (3, 3),
                            padding='same',
                            use_bias=False,
                            name=prefix + '_sepconv2')(x)
        x = BatchNormalization(name=prefix + '_sepconv2_bn')(x)
        x = Activation('relu', name=prefix + '_sepconv3_act')(x)
        x = SeparableConv2D(728, (3, 3),
                            padding='same',
                            use_bias=False,
                            name=prefix + '_sepconv3')(x)
        x = BatchNormalization(name=prefix + '_sepconv3_bn')(x)

        x = layers.add([x, residual])

    residual = Conv2D(1024, (1, 1),
                      strides=(2, 2),
                      padding='same',
                      use_bias=False)(x)
    residual = BatchNormalization()(residual)

    x = Activation('relu', name='block13_sepconv1_act')(x)
    x = SeparableConv2D(728, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block13_sepconv1')(x)
    x = BatchNormalization(name='block13_sepconv1_bn')(x)
    x = Activation('relu', name='block13_sepconv2_act')(x)
    x = SeparableConv2D(1024, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block13_sepconv2')(x)
    x = BatchNormalization(name='block13_sepconv2_bn')(x)

    x = MaxPooling2D((3, 3),
                     strides=(2, 2),
                     padding='same',
                     name='block13_pool')(x)
    x = layers.add([x, residual])

    x = SeparableConv2D(1536, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block14_sepconv1')(x)
    x = BatchNormalization(name='block14_sepconv1_bn')(x)
    x = Activation('relu', name='block14_sepconv1_act')(x)

    x = SeparableConv2D(2048, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block14_sepconv2')(x)
    x = BatchNormalization(name='block14_sepconv2_bn')(x)
    x = Activation('relu', name='block14_sepconv2_act')(x)

    if include_top:
        x = GlobalAveragePooling2D(name='avg_pool')(x)
        x = Dense(classes, activation='softmax', name='predictions')(x)
    else:
        if pooling == 'avg':
            x = GlobalAveragePooling2D()(x)
        elif pooling == 'max':
            x = GlobalMaxPooling2D()(x)
        x = Flatten(name='custom')(x)  ##DB

    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`.
    if input_tensor is not None:
        inputs = layer_utils.get_source_inputs(input_tensor)
    else:
        inputs = img_input
    # Create model.
    model = Model(inputs, x, name='xception')

    # load weights
    if weights == 'imagenet':
        if include_top:
            weights_path = get_file(
                'xception_weights_tf_dim_ordering_tf_kernels.h5',
                TF_WEIGHTS_PATH,
                cache_subdir='models',
                file_hash='0a58e3b7378bc2990ea3b43d5981f1f6')
        else:
            weights_path = get_file(
                'xception_weights_tf_dim_ordering_tf_kernels_notop.h5',
                TF_WEIGHTS_PATH_NO_TOP,
                cache_subdir='models',
                file_hash='b0042744bf5b25fce3cb969f33bebb97')
        model.load_weights(weights_path)
    elif weights is not None:
        model.load_weights(weights)

    if old_data_format:
        K.set_image_data_format(old_data_format)
    return model
def get_test_model_full():
    """Returns a maximally complex test model,
    using all supported layer types with different parameter combination.
    """
    input_shapes = [
        (26, 28, 3),
        (4, 4, 3),
        (4, 4, 3),
        (4, ),
        (2, 3),
        (27, 29, 1),
        (17, 1),
        (17, 4),
    ]
    inputs = [Input(shape=s) for s in input_shapes]

    outputs = []

    for inp in inputs[6:8]:
        for padding in ['valid', 'same']:
            for s in range(1, 6):
                for out_channels in [1, 2]:
                    for d in range(1, 4):
                        outputs.append(
                            Conv1D(out_channels,
                                   s,
                                   padding=padding,
                                   dilation_rate=d)(inp))
        for padding_size in range(0, 5):
            outputs.append(ZeroPadding1D(padding_size)(inp))
        for crop_left in range(0, 2):
            for crop_right in range(0, 2):
                outputs.append(Cropping1D((crop_left, crop_right))(inp))
        for upsampling_factor in range(1, 5):
            outputs.append(UpSampling1D(upsampling_factor)(inp))
        for padding in ['valid', 'same']:
            for pool_factor in range(1, 6):
                for s in range(1, 4):
                    outputs.append(
                        MaxPooling1D(pool_factor, strides=s,
                                     padding=padding)(inp))
                    outputs.append(
                        AveragePooling1D(pool_factor,
                                         strides=s,
                                         padding=padding)(inp))
        outputs.append(GlobalMaxPooling1D()(inp))
        outputs.append(GlobalAveragePooling1D()(inp))

    for inp in [inputs[0], inputs[5]]:
        for padding in ['valid', 'same']:
            for h in range(1, 6):
                for out_channels in [1, 2]:
                    for d in range(1, 4):
                        outputs.append(
                            Conv2D(out_channels, (h, 1),
                                   padding=padding,
                                   dilation_rate=(d, 1))(inp))
                        outputs.append(
                            SeparableConv2D(out_channels, (h, 1),
                                            padding=padding,
                                            dilation_rate=(d, 1))(inp))
                    for sy in range(1, 4):
                        outputs.append(
                            Conv2D(out_channels, (h, 1),
                                   strides=(1, sy),
                                   padding=padding)(inp))
                        outputs.append(
                            SeparableConv2D(out_channels, (h, 1),
                                            strides=(sy, sy),
                                            padding=padding)(inp))
                for sy in range(1, 4):
                    outputs.append(
                        MaxPooling2D((h, 1), strides=(1, sy),
                                     padding=padding)(inp))
            for w in range(1, 6):
                for out_channels in [1, 2]:
                    for d in range(1, 4) if sy == 1 else [1]:
                        outputs.append(
                            Conv2D(out_channels, (1, w),
                                   padding=padding,
                                   dilation_rate=(1, d))(inp))
                        outputs.append(
                            SeparableConv2D(out_channels, (1, w),
                                            padding=padding,
                                            dilation_rate=(1, d))(inp))
                    for sx in range(1, 4):
                        outputs.append(
                            Conv2D(out_channels, (1, w),
                                   strides=(sx, 1),
                                   padding=padding)(inp))
                        outputs.append(
                            SeparableConv2D(out_channels, (1, w),
                                            strides=(sx, sx),
                                            padding=padding)(inp))
                for sx in range(1, 4):
                    outputs.append(
                        MaxPooling2D((1, w), strides=(1, sx),
                                     padding=padding)(inp))
    outputs.append(ZeroPadding2D(2)(inputs[0]))
    outputs.append(ZeroPadding2D((2, 3))(inputs[0]))
    outputs.append(ZeroPadding2D(((1, 2), (3, 4)))(inputs[0]))
    outputs.append(Cropping2D(2)(inputs[0]))
    outputs.append(Cropping2D((2, 3))(inputs[0]))
    outputs.append(Cropping2D(((1, 2), (3, 4)))(inputs[0]))
    for y in range(1, 3):
        for x in range(1, 3):
            outputs.append(UpSampling2D(size=(y, x))(inputs[0]))
    outputs.append(GlobalAveragePooling2D()(inputs[0]))
    outputs.append(GlobalMaxPooling2D()(inputs[0]))
    outputs.append(AveragePooling2D((2, 2))(inputs[0]))
    outputs.append(MaxPooling2D((2, 2))(inputs[0]))
    outputs.append(UpSampling2D((2, 2))(inputs[0]))
    outputs.append(keras.layers.concatenate([inputs[0], inputs[0]]))
    outputs.append(Dropout(0.5)(inputs[0]))

    outputs.append(BatchNormalization()(inputs[0]))
    outputs.append(BatchNormalization(center=False)(inputs[0]))
    outputs.append(BatchNormalization(scale=False)(inputs[0]))

    outputs.append(Conv2D(2, (3, 3), use_bias=True)(inputs[0]))
    outputs.append(Conv2D(2, (3, 3), use_bias=False)(inputs[0]))
    outputs.append(SeparableConv2D(2, (3, 3), use_bias=True)(inputs[0]))
    outputs.append(SeparableConv2D(2, (3, 3), use_bias=False)(inputs[0]))

    outputs.append(Dense(2, use_bias=True)(inputs[3]))
    outputs.append(Dense(2, use_bias=False)(inputs[3]))

    shared_conv = Conv2D(1, (1, 1),
                         padding='valid',
                         name='shared_conv',
                         activation='relu')

    up_scale_2 = UpSampling2D((2, 2))
    x1 = shared_conv(up_scale_2(inputs[1]))  # (1, 8, 8)
    x2 = shared_conv(up_scale_2(inputs[2]))  # (1, 8, 8)
    x3 = Conv2D(1, (1, 1), padding='valid')(up_scale_2(inputs[2]))  # (1, 8, 8)
    x = keras.layers.concatenate([x1, x2, x3])  # (3, 8, 8)
    outputs.append(x)

    x = Conv2D(3, (1, 1), padding='same', use_bias=False)(x)  # (3, 8, 8)
    outputs.append(x)
    x = Dropout(0.5)(x)
    outputs.append(x)
    x = keras.layers.concatenate(
        [MaxPooling2D((2, 2))(x),
         AveragePooling2D((2, 2))(x)])  # (6, 4, 4)
    outputs.append(x)

    x = Flatten()(x)  # (1, 1, 96)
    x = Dense(4, use_bias=False)(x)
    outputs.append(x)
    x = Dense(3)(x)  # (1, 1, 3)
    outputs.append(x)

    intermediate_input_shape = (3, )
    intermediate_in = Input(intermediate_input_shape)
    intermediate_x = intermediate_in
    intermediate_x = Dense(8)(intermediate_x)
    intermediate_x = Dense(5)(intermediate_x)
    intermediate_model = Model(inputs=[intermediate_in],
                               outputs=[intermediate_x],
                               name='intermediate_model')
    intermediate_model.compile(loss='mse', optimizer='nadam')

    x = intermediate_model(x)  # (1, 1, 5)

    intermediate_model_2 = Sequential()
    intermediate_model_2.add(Dense(7, input_shape=(5, )))
    intermediate_model_2.add(Dense(5))
    intermediate_model_2.compile(optimizer='rmsprop',
                                 loss='categorical_crossentropy')

    x = intermediate_model_2(x)  # (1, 1, 5)

    x = Dense(3)(x)  # (1, 1, 3)

    shared_activation = Activation('tanh')

    outputs = outputs + [
        Activation('tanh')(inputs[3]),
        Activation('hard_sigmoid')(inputs[3]),
        Activation('selu')(inputs[3]),
        Activation('sigmoid')(inputs[3]),
        Activation('softplus')(inputs[3]),
        Activation('softmax')(inputs[3]),
        Activation('relu')(inputs[3]),
        LeakyReLU()(inputs[3]),
        ELU()(inputs[3]),
        shared_activation(inputs[3]),
        inputs[4],
        inputs[1],
        x,
        shared_activation(x),
    ]

    print('Model has {} outputs.'.format(len(outputs)))

    model = Model(inputs=inputs, outputs=outputs, name='test_model_full')
    model.compile(loss='mse', optimizer='nadam')

    # fit to dummy data
    training_data_size = 1
    batch_size = 1
    epochs = 10
    data_in = generate_input_data(training_data_size, input_shapes)
    data_out = generate_output_data(training_data_size, outputs)
    model.fit(data_in, data_out, epochs=epochs, batch_size=batch_size)
    return model
def EEGNet_SSVEP(nb_classes,
                 Chans=64,
                 Samples=128,
                 regRate=0.0001,
                 dropoutRate=0.25,
                 kernLength=64,
                 numFilters=8):
    """ Keras Implementation of the variant of EEGNet that was used to classify
    signals from an SSVEP task (https://arxiv.org/abs/1803.04566)

       
    Inputs:
        
        nb_classes     : int, number of classes to classify
        Chans, Samples : number of channels and time points in the EEG data
        regRate        : regularization parameter for L1 and L2 penalties
        dropoutRate    : dropout fraction
        kernLength     : length of temporal convolution in first layer
        numFilters     : number of temporal-spatial filter pairs to learn
    
    """

    input1 = Input(shape=(1, Chans, Samples))

    ##################################################################
    layer1 = Conv2D(numFilters, (1, kernLength),
                    padding='same',
                    kernel_regularizer=l1_l2(l1=0.0, l2=0.0),
                    input_shape=(1, Chans, Samples),
                    use_bias=False)(input1)
    layer1 = BatchNormalization(axis=1)(layer1)
    layer1 = DepthwiseConv2D((Chans, 1),
                             depthwise_regularizer=l1_l2(l1=regRate,
                                                         l2=regRate),
                             use_bias=False)(layer1)
    layer1 = BatchNormalization(axis=1)(layer1)
    layer1 = Activation('elu')(layer1)
    layer1 = SpatialDropout2D(dropoutRate)(layer1)

    layer2 = SeparableConv2D(numFilters, (1, 8),
                             depthwise_regularizer=l1_l2(l1=0.0, l2=regRate),
                             use_bias=False,
                             padding='same')(layer1)
    layer2 = BatchNormalization(axis=1)(layer2)
    layer2 = Activation('elu')(layer2)
    layer2 = AveragePooling2D((1, 4))(layer2)
    layer2 = SpatialDropout2D(dropoutRate)(layer2)

    layer3 = SeparableConv2D(numFilters * 2, (1, 8),
                             depth_multiplier=2,
                             depthwise_regularizer=l1_l2(l1=0.0, l2=regRate),
                             use_bias=False,
                             padding='same')(layer2)
    layer3 = BatchNormalization(axis=1)(layer3)
    layer3 = Activation('elu')(layer3)
    layer3 = AveragePooling2D((1, 4))(layer3)
    layer3 = SpatialDropout2D(dropoutRate)(layer3)

    flatten = Flatten(name='flatten')(layer3)

    dense = Dense(nb_classes, name='dense')(flatten)
    softmax = Activation('softmax', name='softmax')(dense)

    return Model(inputs=input1, outputs=softmax)
예제 #21
0
def xception(include_top=True,
             weights="imagenet",
             input_tensor=None,
             input_shape=None,
             pooling=None,
             classes=1000):

    input_shape = _obtain_input_shape(input_shape=input_shape,
                                      default_size=299,
                                      min_size=71,
                                      data_format=K.image_data_format(),
                                      require_flatten=include_top,
                                      weights=weights)

    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor():
            img_input = Input(shape=input_shape, tensor=input_tensor)
        else:
            img_input = input_tensor

    if K.image_data_format() == 'channels_last':
        bn_axis = -1
    else:
        bn_axis = 1

    # Block 1
    x = Conv2D(32, (3, 3),
               strides=(2, 2),
               use_bias=False,
               kernel_initializer='he_normal',
               kernel_regularizer=l2(l2_norm),
               name="conv1_1")(img_input)
    x = BatchNormalization(axis=bn_axis, name="conv1_1_bn")(x)
    x = Activation('relu')(x)
    x = Conv2D(64, (3, 3),
               use_bias=False,
               kernel_initializer='he_normal',
               kernel_regularizer=l2(l2_norm),
               name="conv1_2")(x)
    x = BatchNormalization(axis=bn_axis, name="conv1_2_bn")(x)
    x = Activation('relu')(x)

    # Block 2
    x = residual_separable(x, [128, 128, 128], block=2, bn_axis=bn_axis)

    # Block 3
    x = residual_separable(x, [256, 256, 256], block=3, bn_axis=bn_axis)

    # Block 4
    x = residual_separable(x, [728, 728, 728], block=4, bn_axis=bn_axis)

    # Block 5->12
    for i in range(5, 13, 1):
        x = middle_flow(x, 728, block=i, bn_axis=bn_axis)

    # Block 13
    x = residual_separable(x, [1024, 728, 1024], block=13, bn_axis=bn_axis)

    # Block14

    x = SeparableConv2D(1536, (3, 3),
                        padding='same',
                        use_bias=False,
                        kernel_initializer='he_normal',
                        kernel_regularizer=l2(l2_norm),
                        name="conv14_1")(x)
    x = BatchNormalization(axis=bn_axis, name="conv14_1_bn")(x)
    x = Activation('relu')(x)
    x = SeparableConv2D(2048, (3, 3),
                        padding='same',
                        use_bias=False,
                        kernel_initializer='he_normal',
                        kernel_regularizer=l2(l2_norm),
                        name="conv14_2")(x)
    x = BatchNormalization(axis=bn_axis, name="conv14_2_bn")(x)
    x = Activation('relu', name="pool5")(x)

    if include_top:
        x = GlobalAveragePooling2D()(x)
        x = Dense(classes, activation='softmax', name='classifier')(x)
    else:
        if pooling == 'avg':
            x = GlobalAveragePooling2D()(x)
        elif pooling == 'max':
            x = GlobalMaxPooling2D()(x)

    if input_tensor is None:
        inputs = get_source_inputs(input_tensor)
    else:
        inputs = img_input

    model = Model(inputs, x, name='xception')
    model.summary()

    return model
예제 #22
0
def atrous_spatial_pyramid_pooling(input_layer,
                                   global_image_pooling_upsampling_factor=None
                                   ):
    # branch: 1x1 conv
    b_aspp_0 = _Conv2D(input_layer,
                       filters=256,
                       kernel_size=1,
                       name='aspp_0_conv',
                       bn_epsilon=1e-5)

    # branch: 3x3 conv, rate 6
    b_aspp_1 = SeparableConv2D(filters=256,
                               kernel_size=3,
                               padding='same',
                               dilation_rate=6,
                               use_bias=False,
                               name='aspp_1_sepconv')(input_layer)
    b_aspp_1 = BatchNormalization(name='aspp_1_sepconv_bn',
                                  epsilon=1e-5)(b_aspp_1)
    b_aspp_1 = ReLU()(b_aspp_1)

    # branch: 3x3 conv, rate 12
    b_aspp_2 = SeparableConv2D(filters=256,
                               kernel_size=3,
                               padding='same',
                               dilation_rate=12,
                               use_bias=False,
                               name='aspp_2_sepconv')(input_layer)
    b_aspp_2 = BatchNormalization(name='aspp_2_sepconv_bn',
                                  epsilon=1e-5)(b_aspp_2)
    b_aspp_2 = ReLU()(b_aspp_2)

    # branch: 3x3 conv, rate 18
    b_aspp_3 = SeparableConv2D(filters=256,
                               kernel_size=3,
                               padding='same',
                               dilation_rate=18,
                               use_bias=False,
                               name='pyramid_3x3sepconv')(input_layer)
    b_aspp_3 = BatchNormalization(name='pyramid_3x3sepconv_bn',
                                  epsilon=1e-5)(b_aspp_3)
    b_aspp_3 = ReLU()(b_aspp_3)

    if global_image_pooling_upsampling_factor is None:
        output_layer = Concatenate()([b_aspp_0, b_aspp_1, b_aspp_2, b_aspp_3])

    else:
        # branch: global image pooling
        b_image_pooling = GlobalAveragePooling2D(
            name='pyramid_img_pool')(input_layer)
        b_image_pooling = Lambda(
            lambda x: K.expand_dims(K.expand_dims(x, 1), 1))(
                b_image_pooling
            )  # (batch size x channels)->(batch size x 1 x 1 x channels)
        b_image_pooling = Conv2D(filters=256,
                                 kernel_size=1,
                                 padding='same',
                                 use_bias=False,
                                 name='pyramid_img_pool_conv')(b_image_pooling)
        b_image_pooling = BatchNormalization(
            name='pyramid_img_pool_conv_bn')(b_image_pooling)
        b_image_pooling = ReLU()(b_image_pooling)
        b_image_pooling = UpSampling2D(
            global_image_pooling_upsampling_factor,
            interpolation='bilinear')(b_image_pooling)

        output_layer = Concatenate()(
            [b_aspp_0, b_aspp_1, b_aspp_2, b_aspp_3, b_image_pooling])

    return output_layer
예제 #23
0
def create_network(input_resolution, n_classes=34):
    input_layer = Input(shape=(*input_resolution, 3))
    # input_layer = Input(shape=(None, None, 3))

    # entry flow
    x = _Conv2D(input_layer,
                filters=32,
                kernel_size=3,
                stride=2,
                name='ef_conv32')

    x = _Conv2D(x, filters=64, kernel_size=3, stride=1, name='ef_conv64')

    x = xception_block(x,
                       filter=128,
                       last_stride=2,
                       last_rate=1,
                       name='ef_x_block_1',
                       residual_type='conv')

    x, skip = xception_block(x,
                             filter=256,
                             last_stride=2,
                             last_rate=1,
                             name='ef_x_block_2',
                             residual_type='conv',
                             return_skip=True)

    x = xception_block(x,
                       filter=728,
                       last_stride=2,
                       last_rate=1,
                       name='ef_x_block_3',
                       residual_type='conv')

    # middle flow
    for i in range(16):
        x = xception_block(x,
                           filter=728,
                           last_stride=1,
                           last_rate=1,
                           name='mf_x_block_{}'.format(i + 1),
                           residual_type='add')

    # exit
    x = xception_block(x, [728, 1024, 1024],
                       last_stride=1,
                       last_rate=1,
                       name='xf_x_block_1',
                       residual_type='conv')

    x = xception_block(x, [1536, 1536, 2048],
                       last_stride=1,
                       last_rate=1,
                       name='xf_x_block_2',
                       residual_type='none')

    # atrous spatial pyramid pooling
    if None in input_resolution:  # input resolution not defined --> no global pooling (fully convolutional)
        x = atrous_spatial_pyramid_pooling(x)

    else:  # using global pooling
        global_image_pooling_upsampling_factor = tuple(
            i / 16 for i in input_resolution)
        x = atrous_spatial_pyramid_pooling(
            x,
            global_image_pooling_upsampling_factor=
            global_image_pooling_upsampling_factor)

    # 1x1 conv after aspp
    x = _Conv2D(x, filters=256, kernel_size=1, name='1x1conv_after_aspp')
    x = Dropout(0.1)(x)

    # upsampling by 4
    x = UpSampling2D(size=4, interpolation='bilinear')(x)

    # reducing hypercolumn channels
    skip = _Conv2D(skip, filters=48, kernel_size=1, name='hypercolumn_conv48')

    # concat hypercolumn and high-level features
    x = Concatenate()([skip, x])

    # 2x sepConv 3x3
    x = SeparableConv2D(filters=256,
                        kernel_size=3,
                        padding='same',
                        use_bias=False,
                        name='decoder_sepconv_1')(x)
    x = BatchNormalization(name='decoder_sepconv_1_bn', epsilon=1e-5)(x)
    x = ReLU()(x)

    x = SeparableConv2D(filters=256,
                        kernel_size=3,
                        padding='same',
                        use_bias=False,
                        name='decoder_sepconv_2')(x)
    x = BatchNormalization(name='decoder_sepconv_2_bn', epsilon=1e-5)(x)
    x = ReLU()(x)

    # 1x1 conv reducing channels to class number
    x = Conv2D(filters=n_classes,
               kernel_size=1,
               padding='same',
               name='reduce_channels')(x)

    # upsampling by 4
    x = UpSampling2D(size=4, interpolation='bilinear')(x)

    # activation
    x = Activation('softmax')(x)

    model = Model(inputs=input_layer, outputs=x)

    return model
예제 #24
0
def miniXception(input_shape, num_classes):

    #  Base Modulr
    img_input = Input(input_shape)
    x = Conv2D(8, (3, 3),
               strides=(1, 1),
               kernel_regularizer=l2(0.01),
               use_bias=False)(img_input)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Conv2D(8, (3, 3),
               strides=(1, 1),
               kernel_regularizer=l2(0.01),
               use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    # Residual Module 1
    residual = Conv2D(16, (1, 1),
                      strides=(2, 2),
                      padding='same',
                      use_bias=False)(x)
    residual = BatchNormalization()(residual)

    x = SeparableConv2D(16, (3, 3),
                        padding='same',
                        kernel_regularizer=l2(0.01),
                        use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = SeparableConv2D(16, (3, 3),
                        padding='same',
                        kernel_regularizer=l2(0.01),
                        use_bias=False)(x)
    x = BatchNormalization()(x)
    x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)

    x = layers.add([x, residual])

    # Residual Module 2
    residual = Conv2D(32, (1, 1),
                      strides=(2, 2),
                      padding='same',
                      use_bias=False)(x)
    residual = BatchNormalization()(residual)

    x = SeparableConv2D(32, (3, 3),
                        padding='same',
                        kernel_regularizer=l2(0.01),
                        use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = SeparableConv2D(32, (3, 3),
                        padding='same',
                        kernel_regularizer=l2(0.01),
                        use_bias=False)(x)
    x = BatchNormalization()(x)
    x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)

    x = layers.add([x, residual])

    # Residual Module 3
    residual = Conv2D(64, (1, 1),
                      strides=(2, 2),
                      padding='same',
                      use_bias=False)(x)
    residual = BatchNormalization()(residual)

    x = SeparableConv2D(64, (3, 3),
                        padding='same',
                        kernel_regularizer=l2(0.01),
                        use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = SeparableConv2D(64, (3, 3),
                        padding='same',
                        kernel_regularizer=l2(0.01),
                        use_bias=False)(x)
    x = BatchNormalization()(x)
    x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)

    x = layers.add([x, residual])

    # Residual Module 4
    residual = Conv2D(128, (1, 1),
                      strides=(2, 2),
                      padding='same',
                      use_bias=False)(x)
    residual = BatchNormalization()(residual)

    x = SeparableConv2D(128, (3, 3),
                        padding='same',
                        kernel_regularizer=l2(0.01),
                        use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = SeparableConv2D(128, (3, 3),
                        padding='same',
                        kernel_regularizer=l2(0.01),
                        use_bias=False)(x)
    x = BatchNormalization()(x)
    x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)

    x = layers.add([x, residual])

    #Output Module
    x = Conv2D(num_classes, (3, 3), padding='same')(x)
    x = GlobalAveragePooling2D()(x)
    output = Activation('softmax', name='predictions')(x)

    model = Model(img_input, output)
    return model
예제 #25
0
def EEGNet(nb_classes, Chans = 64, Samples = 128, 
             dropoutRate = 0.5, kernLength = 64, F1 = 8, 
             D = 2, F2 = 16, norm_rate = 0.25, dropoutType = 'Dropout'):
    """ Keras Implementation of EEGNet
    http://iopscience.iop.org/article/10.1088/1741-2552/aace8c/meta
    Note that this implements the newest version of EEGNet and NOT the earlier
    version (version v1 and v2 on arxiv). We strongly recommend using this
    architecture as it performs much better and has nicer properties than
    our earlier version. For example:
        
        1. Depthwise Convolutions to learn spatial filters within a 
        temporal convolution. The use of the depth_multiplier option maps 
        exactly to the number of spatial filters learned within a temporal
        filter. This matches the setup of algorithms like FBCSP which learn 
        spatial filters within each filter in a filter-bank. This also limits 
        the number of free parameters to fit when compared to a fully-connected
        convolution. 
        
        2. Separable Convolutions to learn how to optimally combine spatial
        filters across temporal bands. Separable Convolutions are Depthwise
        Convolutions followed by (1x1) Pointwise Convolutions. 
        
    
    While the original paper used Dropout, we found that SpatialDropout2D 
    sometimes produced slightly better results for classification of ERP 
    signals. However, SpatialDropout2D significantly reduced performance 
    on the Oscillatory dataset (SMR, BCI-IV Dataset 2A). We recommend using
    the default Dropout in most cases.
        
    Assumes the input signal is sampled at 128Hz. If you want to use this model
    for any other sampling rate you will need to modify the lengths of temporal
    kernels and average pooling size in blocks 1 and 2 as needed (double the 
    kernel lengths for double the sampling rate, etc). Note that we haven't 
    tested the model performance with this rule so this may not work well. 
    
    The model with default parameters gives the EEGNet-8,2 model as discussed
    in the paper. This model should do pretty well in general, although it is
	advised to do some model searching to get optimal performance on your
	particular dataset.
    We set F2 = F1 * D (number of input filters = number of output filters) for
    the SeparableConv2D layer. We haven't extensively tested other values of this
    parameter (say, F2 < F1 * D for compressed learning, and F2 > F1 * D for
    overcomplete). We believe the main parameters to focus on are F1 and D. 
    Inputs:
        
      nb_classes      : int, number of classes to classify
      Chans, Samples  : number of channels and time points in the EEG data
      dropoutRate     : dropout fraction
      kernLength      : length of temporal convolution in first layer. We found
                        that setting this to be half the sampling rate worked
                        well in practice. For the SMR dataset in particular
                        since the data was high-passed at 4Hz we used a kernel
                        length of 32.     
      F1, F2          : number of temporal filters (F1) and number of pointwise
                        filters (F2) to learn. Default: F1 = 8, F2 = F1 * D. 
      D               : number of spatial filters to learn within each temporal
                        convolution. Default: D = 2
      dropoutType     : Either SpatialDropout2D or Dropout, passed as a string.
    """
    
    if dropoutType == 'SpatialDropout2D':
        dropoutType = SpatialDropout2D
    elif dropoutType == 'Dropout':
        dropoutType = Dropout
    else:
        raise ValueError('dropoutType must be one of SpatialDropout2D '
                         'or Dropout, passed as a string.')
    
    input1   = Input(shape = (1, Chans, Samples))

    ##################################################################
    block1       = Conv2D(F1, (1, kernLength), padding = 'same',
                                   input_shape = (1, Chans, Samples),
                                   use_bias = False)(input1)
    block1       = BatchNormalization(axis = 1)(block1)
    block1       = DepthwiseConv2D((Chans, 1), use_bias = False, 
                                   depth_multiplier = D,
                                   depthwise_constraint = max_norm(1.))(block1)
    block1       = BatchNormalization(axis = 1)(block1)
    block1       = Activation('elu')(block1)
    block1       = AveragePooling2D((1, 4))(block1)
    block1       = dropoutType(dropoutRate)(block1)
    
    block2       = SeparableConv2D(F2, (1, 16),
                                   use_bias = False, padding = 'same')(block1)
    block2       = BatchNormalization(axis = 1)(block2)
    block2       = Activation('elu')(block2)
    block2       = AveragePooling2D((1, 8))(block2)
    block2       = dropoutType(dropoutRate)(block2)
        
    flatten      = Flatten(name = 'flatten')(block2)
    
    dense        = Dense(nb_classes, name = 'dense', 
                         kernel_constraint = max_norm(norm_rate))(flatten)
    softmax      = Activation('softmax', name = 'softmax')(dense)
    
    return Model(inputs=input1, outputs=softmax)
예제 #26
0
def ConvBlock(model, layers, filters):
    for i in range(layers):
        model.add(Conv2D(filters, (3, 3), activation='selu'))
        model.add(SeparableConv2D(filters, (3, 3), activation='selu'))
        model.add(BatchNormalization())
        model.add(MaxPooling2D((2, 2), strides=(2, 2)))
def build_model(input_shape=None):
    input_shape = _obtain_input_shape(input_shape,
                                      default_size=im_size,
                                      min_size=24,
                                      data_format=K.image_data_format(),
                                      require_flatten=False,
                                      weights='None')

    img_input = Input(shape=input_shape)
    reg = regularizers.l2(0.001)

    # first block
    x = Conv2D(32, (3, 3),
               strides=(2, 2),
               use_bias=False,
               name='block1_conv1',
               kernel_regularizer=reg)(img_input)
    x = BatchNormalization(name='block1_conv1_bn')(x)
    x = Activation('relu', name='block1_conv1_act')(x)

    x = Conv2D(64, (3, 3),
               use_bias=False,
               name='block1_conv2',
               kernel_regularizer=reg)(x)
    x = BatchNormalization(name='block1_conv2_bn')(x)
    x = Activation('relu', name='block1_conv2_act')(x)

    residual = Conv2D(256, (1, 1),
                      strides=(2, 2),
                      padding='same',
                      use_bias=False,
                      name='residual_conv2d_1',
                      kernel_regularizer=reg)(x)
    residual = BatchNormalization()(residual)

    # second block
    x = SeparableConv2D(128, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block2_sepconv1',
                        kernel_regularizer=reg)(x)
    x = BatchNormalization(name='block2_sepconv1_bn')(x)
    x = Activation('relu', name='block2_sepconv2_act')(x)
    x = SeparableConv2D(256, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block2_sepconv2',
                        kernel_regularizer=reg)(x)
    x = BatchNormalization(name='block2_sepconv2_bn')(x)

    x = MaxPooling2D((3, 3),
                     strides=(2, 2),
                     padding='same',
                     name='block2_pool')(x)
    x = layers.add([x, residual])

    # second residual
    residual = Conv2D(256, (1, 1),
                      strides=(2, 2),
                      padding='same',
                      use_bias=False,
                      name='residual_conv2d_2',
                      kernel_regularizer=reg)(x)
    residual = BatchNormalization()(residual)

    for i in range(2):
        residual = x
        prefix = 'block' + str(i + 3)

        x = Activation('relu', name=prefix + '_sepconv1_act')(x)
        x = SeparableConv2D(256, (3, 3),
                            padding='same',
                            use_bias=False,
                            name=prefix + '_sepconv1',
                            kernel_regularizer=reg)(x)
        x = BatchNormalization(name=prefix + '_sepconv1_bn')(x)
        x = Activation('relu', name=prefix + '_sepconv2_act')(x)
        x = SeparableConv2D(256, (3, 3),
                            padding='same',
                            use_bias=False,
                            name=prefix + '_sepconv2',
                            kernel_regularizer=reg)(x)
        x = BatchNormalization(name=prefix + '_sepconv2_bn')(x)
        x = Activation('relu', name=prefix + '_sepconv3_act')(x)
        x = SeparableConv2D(256, (3, 3),
                            padding='same',
                            use_bias=False,
                            name=prefix + '_sepconv3',
                            kernel_regularizer=reg)(x)
        x = BatchNormalization(name=prefix + '_sepconv3_bn')(x)

        x = layers.add([x, residual])

    residual = Conv2D(384, (1, 1),
                      strides=(2, 2),
                      padding='same',
                      use_bias=False)(x)
    residual = BatchNormalization()(residual)

    # output blocks - block 21
    x = Activation('relu', name='block21_sepconv1_act')(x)
    x = SeparableConv2D(384, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block21_sepconv1',
                        kernel_regularizer=reg)(x)
    x = BatchNormalization(name='block21_sepconv1_bn')(x)
    x = Activation('relu', name='block21_sepconv2_act')(x)
    x = SeparableConv2D(384, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block21_sepconv2',
                        kernel_regularizer=reg)(x)
    x = BatchNormalization(name='block21_sepconv2_bn')(x)

    x = MaxPooling2D((3, 3),
                     strides=(2, 2),
                     padding='same',
                     name='block13_pool')(x)
    x = layers.add([x, residual])

    # block 22
    x = SeparableConv2D(512, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block22_sepconv1',
                        kernel_regularizer=reg)(x)
    x = BatchNormalization(name='block22_sepconv1_bn')(x)
    x = Activation('relu', name='block22_sepconv1_act')(x)

    x = SeparableConv2D(768, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block22_sepconv2',
                        kernel_regularizer=reg)(x)
    x = BatchNormalization(name='block22_sepconv2_bn')(x)
    x = Activation('relu', name='block22_sepconv2_act')(x)

    # model finish
    x = GlobalMaxPooling2D()(x)
    x = Flatten()(x)
    x = Dense(256, activation='relu', kernel_regularizer=reg)(x)
    x = Dense(64, activation='relu', kernel_regularizer=reg)(x)
    x = Dense(num_classes, activation='softmax')(x)

    model = Model(img_input, x, name='micro_xception_bn_v1')
    opt = optimizers.Adam(lr=0.0008, decay=0.001)
    model.compile(optimizer=opt,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    return model
예제 #28
0
def create_squeezenet_ssd_lite(num_classes, is_test=False):
    base_net = squeezenet1_1(False).features  # disable dropout layer

    source_layer_indexes = [
        12
    ]
    extras = [
        [
            Conv2D(filters=256, kernel_size=1, activation='relu'),
            SeparableConv2D(filters=512, kernel_size=3, strides=2, padding='same'),
        ],
        [
            Conv2D(filters=256, kernel_size=1, activation='relu'),
            SeparableConv2D(filters=512, kernel_size=3, strides=2, padding='same'),
        ],
        [
            Conv2D(filters=128, kernel_size=1, activation='relu'),
            SeparableConv2D(filters=256, kernel_size=3, strides=2, padding='same'),
        ],
        [
            Conv2D(filters=128, kernel_size=1, activation='relu'),
            SeparableConv2D(filters=256, kernel_size=3, strides=2, padding='same'),
        ],
        [
            Conv2D(filters=128, kernel_size=1, activation='relu'),
            SeparableConv2D(filters=256, kernel_size=3, strides=2, padding='same')
        ]
    ]

    regression_headers = [
        SeparableConv2D(filters=6 * 4, kernel_size=3, padding='same'),
        SeparableConv2D(filters=6 * 4, kernel_size=3, padding='same'),
        SeparableConv2D(filters=6 * 4, kernel_size=3, padding='same'),
        SeparableConv2D(filters=6 * 4, kernel_size=3, padding='same'),
        SeparableConv2D(filters=6 * 4, kernel_size=3, padding='same'),
        Conv2D(filters=6 * 4, kernel_size=1)
    ]

    classification_headers = [
        SeparableConv2D(filters=6 * num_classes, kernel_size=3, padding='same'),
        SeparableConv2D(filters=6 * num_classes, kernel_size=3, padding='same'),
        SeparableConv2D(filters=6 * num_classes, kernel_size=3, padding='same'),
        SeparableConv2D(filters=6 * num_classes, kernel_size=3, padding='same'),
        SeparableConv2D(filters=6 * num_classes, kernel_size=3, padding='same'),
        Conv2D(filters=6 * num_classes, kernel_size=1),
    ]

    return SSD(num_classes, base_net, source_layer_indexes,
               extras, classification_headers, regression_headers, is_test=is_test, config=config)
예제 #29
0
def decoder_convolutional_block(net,
                                f,
                                filters,
                                stage,
                                block,
                                s=2,
                                dropout_rate=DROPOUT_RATE):
    # Defining name basis
    conv_name_base = 'dec_res' + str(stage) + block + '_branch'
    bn_name_base = 'dec_bn' + str(stage) + block + '_branch'

    # Retrieve Filters
    f1, f2, f3 = filters

    # Save the input value
    net_shortcut = net

    #############
    # MAIN PATH #
    #############
    # First component of main path
    net = ConvSN2D(filters=f1,
                   kernel_size=(1, 1),
                   strides=(1, 1),
                   padding='same',
                   name=conv_name_base + '2a',
                   kernel_initializer=glorot_uniform(seed=0))(net)
    net = BatchNormalization(axis=-1, name=bn_name_base + '2a')(net)
    net = SwishLayer()(net)

    # Second component of main path
    net = TimeDistributed(Dropout(dropout_rate))(net)
    net = ConvSN2DTranspose(filters=f2,
                            kernel_size=(f, f),
                            strides=(s, s),
                            padding='same',
                            name=conv_name_base + '2b',
                            kernel_initializer=glorot_uniform(seed=0))(net)
    net = BatchNormalization(axis=-1, name=bn_name_base + '2b')(net)
    net = SwishLayer()(net)

    # Third component of main path
    net = TimeDistributed(Dropout(dropout_rate))(net)
    net = ConvSN2D(filters=f3,
                   kernel_size=(1, 1),
                   strides=(1, 1),
                   padding='same',
                   name=conv_name_base + '2c',
                   kernel_initializer=glorot_uniform(seed=0))(net)
    net = BatchNormalization(axis=-1, name=bn_name_base + '2c')(net)

    #################
    # SHORTCUT PATH #
    #################
    # net_shortcut = TimeDistributed(ConvSN2D(filters=f3, kernel_size=(1, 1), strides=(s, s),
    #                                       padding='valid', name=conv_name_base + '1',
    #                                       kernel_initializer=glorot_uniform(seed=0)))(net_shortcut)
    # net_shortcut = BatchNormalization(axis=-1, name=bn_name_base + '1')(net_shortcut)

    # nVAE implementation
    net_shortcut = BatchNormalization()(net_shortcut)
    net_shortcut = ConvSN2D(filters=f3,
                            kernel_size=1,
                            name=conv_name_base + "1a",
                            use_bias=False,
                            data_format='channels_last',
                            padding='same')(net_shortcut)
    net_shortcut = BatchNormalization()(net_shortcut)
    net_shortcut = SwishLayer()(net_shortcut)
    net_shortcut = SeparableConv2D(filters=f3,
                                   kernel_size=5,
                                   name=conv_name_base + "1b",
                                   use_bias=False,
                                   data_format='channels_last',
                                   padding='same')(net_shortcut)
    net_shortcut = BatchNormalization()(net_shortcut)
    net_shortcut = SwishLayer()(net_shortcut)
    net_shortcut = ConvSN2DTranspose(filters=f3,
                                     kernel_size=(3, 3),
                                     strides=(s, s),
                                     use_bias=False,
                                     data_format='channels_last',
                                     name=conv_name_base + "1c",
                                     padding='same')(net_shortcut)
    net_shortcut = BatchNormalization()(net_shortcut)
    net_shortcut = SELayer(depth=f3)(net_shortcut)

    # Final step: Add shortcut value to main path, and pass it through a RELU activation
    net = Add()([net, net_shortcut])
    net = SwishLayer()(net)

    return net