예제 #1
0
def projection_block(x, n_filters):
    """ Create a residual block using Depthwise Separable Convolutions with Projection shortcut
        x        : input into residual block
        n_filters: number of filters
    """
    # Remember the input
    shortcut = x

    # Strided convolution to double number of filters in identity link to
    # match output of residual block for the add operation (projection shortcut)
    shortcut = layers.Conv2D(n_filters, (1, 1), strides=(2, 2),
                             padding='same')(shortcut)
    shortcut = layers.BatchNormalization()(shortcut)

    # First Depthwise Separable Convolution
    x = layers.SeparableConv2D(n_filters, (3, 3), padding='same')(x)
    x = layers.BatchNormalization()(x)
    x = layers.ReLU()(x)

    # Second depthwise Separable Convolution
    x = layers.SeparableConv2D(n_filters, (3, 3), padding='same')(x)
    x = layers.BatchNormalization()(x)
    x = layers.ReLU()(x)

    # Create pooled feature maps, reduce size by 75%
    x = layers.MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)

    # Add the projection shortcut to the output of the block
    x = layers.add([x, shortcut])

    return x
예제 #2
0
def inverted_bottleneck(inputs,
                        up_channel_rate,
                        channels,
                        subsample,
                        k_s=3,
                        scope=""):
    if inputs.shape[-1] == channels:
        origin_inputs = inputs
    else:
        origin_inputs = layers.SeparableConv2D(channels,
                                               kernel_size=1,
                                               activation='relu',
                                               padding="same")(inputs)

    tower = layers.Conv2D(filters=channels // 2,
                          kernel_size=(1, 1),
                          activation='relu',
                          padding='same')(inputs)
    tower = layers.BatchNormalization()(tower)

    tower = layers.SeparableConv2D(filters=channels // 2,
                                   kernel_size=k_s,
                                   activation='relu',
                                   padding='same')(tower)
    tower = layers.BatchNormalization()(tower)

    tower = layers.Conv2D(filters=channels,
                          kernel_size=(1, 1),
                          activation='relu',
                          padding='same')(tower)
    tower = layers.BatchNormalization()(tower)

    output = layers.Add()([origin_inputs, tower])

    return output
예제 #3
0
def residual_block_entry(x, nb_filters):
    """ Create a residual block using Depthwise Separable Convolutions
        x         : input into residual block
        nb_filters: number of filters
    """
    shortcut = x

    # First Depthwise Separable Convolution
    x = layers.SeparableConv2D(nb_filters, (3, 3), padding='same')(x)
    x = layers.BatchNormalization()(x)
    x = layers.ReLU()(x)

    # Second depthwise Separable Convolution
    x = layers.SeparableConv2D(nb_filters, (3, 3), padding='same')(x)
    x = layers.BatchNormalization()(x)
    x = layers.ReLU()(x)

    # Create pooled feature maps, reduce size by 75%
    x = layers.MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)

    # Add strided convolution to identity link to double number of filters to
    # match output of residual block for the add operation
    shortcut = layers.Conv2D(nb_filters, (1, 1),
                             strides=(2, 2),
                             padding='same')(shortcut)
    shortcut = layers.BatchNormalization()(shortcut)

    x = layers.add([x, shortcut])

    return x
def bottleneck(input, output_channels):
    # input: (x, x, dim)
    if input.shape[-1] == output_channels:
        _skip = input  # (x, x, output_channels)
    else:
        _skip = layers.SeparableConv2D(output_channels,
                                       kernel_size=1,
                                       activation='relu',
                                       padding='same')(
                                           input)  # (x, x, output_channels)

    _x = layers.SeparableConv2D(output_channels // 2,
                                kernel_size=1,
                                activation='relu',
                                padding='same')(
                                    input)  # (x, x, output_channels // 2)
    _x = layers.BatchNormalization()(_x)  # (x, x, output_channels // 2)

    _x = layers.SeparableConv2D(output_channels // 2,
                                kernel_size=3,
                                activation='relu',
                                padding='same')(
                                    _x)  # (x, x, output_channels // 2)
    _x = layers.BatchNormalization()(_x)  # (x, x, output_channels // 2)

    _x = layers.SeparableConv2D(output_channels,
                                kernel_size=1,
                                activation='relu',
                                padding='same')(_x)  # (x, x, output_channels)
    _x = layers.BatchNormalization()(_x)  # (x, x, output_channels)

    output = layers.Add()([_skip, _x])  # (x, x, output_channels)

    return output
def create_mnist_f_model(dropout, drop_rate):
    model = keras.models.Sequential()
    input_shape = (IH, IW, IZ)
    if RANDOM_CROPS:
        model.add(
            layers.experimental.preprocessing.RandomCrop(
                IH, IW, input_shape=input_shape))
        model.add(layers.ZeroPadding2D(padding=(1, 1)))
    else:
        model.add(layers.ZeroPadding2D(padding=(1, 1),
                                       input_shape=input_shape))
    model.add(layers.ZeroPadding2D(padding=(1, 1), input_shape=input_shape))
    model.add(layers.Conv2D(28, kernel_size=(2, 2), activation=relu))
    if dropout:
        model.add(layers.Dropout(drop_rate))
    model.add(layers.SeparableConv2D(36, kernel_size=(2, 2), activation=relu))
    model.add(layers.MaxPooling2D(pool_size=(2, 2)))
    if dropout:
        model.add(layers.Dropout(drop_rate))
    model.add(layers.SeparableConv2D(42, kernel_size=(2, 2), activation=relu))
    if dropout:
        model.add(layers.Dropout(drop_rate))
    model.add(layers.Flatten())
    model.add(layers.Dense(768, activation=relu))
    if dropout:
        model.add(layers.Dropout(drop_rate))
    model.add(layers.Dense(512, activation=relu))
    model.add(layers.Dense(NUM_CLASSES, activation=tf.nn.softmax))
    model.compile(optimizer=keras.optimizers.Adam(),
                  loss=keras.losses.categorical_crossentropy,
                  metrics=['accuracy'])
    return model
def build(input_shape, classes):
    inputs = tf.keras.Input(shape=input_shape)
    a = layers.Conv2D(16, (7, 7), activation='relu', padding="same")(inputs)
    a = layers.BatchNormalization()(a)

    a = layers.Conv2D(16, (7, 7), activation='relu', padding="same")(a)
    a = layers.BatchNormalization()(a)

    b = layers.SeparableConv2D(32, (5, 5), activation='relu',
                               padding="same")(a)
    b = layers.BatchNormalization()(b)

    b = layers.SeparableConv2D(32, (5, 5), activation='relu',
                               padding="same")(b)
    b = layers.BatchNormalization()(b)

    b = layers.MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(b)

    a = layers.Conv2D(32, (25, 25), activation='relu')(a)

    c = tf.add(a, b)
    c = layers.Conv2D(44, (3, 3), activation='relu', padding="same")(c)
    c = layers.GlobalAveragePooling2D()(c)

    c = layers.Flatten()(c)

    pred = layers.Dense(classes, activation='softmax')(c)

    model = tf.keras.Model(inputs=inputs, outputs=pred)
    return model
예제 #7
0
def make_model(input_shape, num_classes, data_augmentation):
    """
    ## Build a model
    We'll build a small version of the Xception network. We haven't particularly tried to
    optimize the architecture; if you want to do a systematic search for the best model
     configuration, consider using
    [Keras Tuner](https://github.com/keras-team/keras-tuner).
    Note that:
    - We start the model with the `data_augmentation` preprocessor, followed by a
     `Rescaling` layer.
    - We include a `Dropout` layer before the final classification layer.
    """
    inputs = keras.Input(shape=input_shape)
    # Image augmentation block
    x = data_augmentation(inputs)

    # Entry block
    x = layers.experimental.preprocessing.Rescaling(1.0 / 255)(x)
    x = layers.Conv2D(32, 3, strides=2, padding="same")(x)
    x = layers.BatchNormalization()(x)
    x = layers.Activation("relu")(x)

    x = layers.Conv2D(64, 3, padding="same")(x)
    x = layers.BatchNormalization()(x)
    x = layers.Activation("relu")(x)

    previous_block_activation = x  # Set aside residual

    for size in [128, 256, 512, 728]:
        x = layers.Activation("relu")(x)
        x = layers.SeparableConv2D(size, 3, padding="same")(x)
        x = layers.BatchNormalization()(x)

        x = layers.Activation("relu")(x)
        x = layers.SeparableConv2D(size, 3, padding="same")(x)
        x = layers.BatchNormalization()(x)

        x = layers.MaxPooling2D(3, strides=2, padding="same")(x)

        # Project residual
        residual = layers.Conv2D(size, 1, strides=2,
                                 padding="same")(previous_block_activation)
        x = layers.add([x, residual])  # Add back residual
        previous_block_activation = x  # Set aside next residual

    x = layers.SeparableConv2D(1024, 3, padding="same")(x)
    x = layers.BatchNormalization()(x)
    x = layers.Activation("relu")(x)

    x = layers.GlobalAveragePooling2D()(x)
    if num_classes == 2:
        activation = "sigmoid"
        units = 1
    else:
        activation = "softmax"
        units = num_classes

    x = layers.Dropout(0.5)(x)
    outputs = layers.Dense(units, activation=activation)(x)
    return keras.Model(inputs, outputs)
예제 #8
0
def double_blaze_block(x: tf.Tensor, filters, mid_channels=None, stride=1):
    assert stride in [1, 2]
    inp_channels = x.get_shape()[3]
    mid_channels = mid_channels or inp_channels
    usepool = stride > 1

    conv1 = layers.SeparableConv2D(filters=mid_channels,
                                   kernel_size=5,
                                   strides=stride,
                                   padding='same')(x)
    bn1 = layers.BatchNormalization()(conv1)
    relu1 = tf.nn.relu(bn1)

    conv2 = layers.SeparableConv2D(filters=filters,
                                   kernel_size=5,
                                   strides=1,
                                   padding='same')(relu1)
    bn2 = layers.BatchNormalization()(conv2)

    if usepool:
        channel_padding_dim = filters - inp_channels
        max_pool1 = layers.MaxPooling2D(pool_size=stride,
                                        strides=stride,
                                        padding='same')(x)
        x = tf.pad(max_pool1,
                   [[0, 0], [0, 0], [0, 0], [channel_padding_dim, 0]],
                   mode='CONSTANT')
    return tf.nn.relu(bn2 + x)
예제 #9
0
def get_model(img_size, num_classes):
    inputs = keras.Input(shape=img_size + (3, ))

    ### [First half of the network: downsampling inputs] ###

    # Entry block
    x = layers.Conv2D(32, 3, strides=2, padding="same")(inputs)
    x = layers.BatchNormalization()(x)
    x = layers.Activation("relu")(x)

    previous_block_activation = x  # Set aside residual

    # Blocks 1, 2, 3 are identical apart from the feature depth.
    for filters in [64, 128, 256]:
        x = layers.Activation("relu")(x)
        x = layers.SeparableConv2D(filters, 3, padding="same")(x)
        x = layers.BatchNormalization()(x)

        x = layers.Activation("relu")(x)
        x = layers.SeparableConv2D(filters, 3, padding="same")(x)
        x = layers.BatchNormalization()(x)

        x = layers.MaxPooling2D(3, strides=2, padding="same")(x)

        # Project residual
        residual = layers.Conv2D(filters, 1, strides=2,
                                 padding="same")(previous_block_activation)
        x = layers.add([x, residual])  # Add back residual
        previous_block_activation = x  # Set aside next residual

    ### [Second half of the network: upsampling inputs] ###

    previous_block_activation = x  # Set aside residual

    for filters in [256, 128, 64, 32]:
        x = layers.Activation("relu")(x)
        x = layers.Conv2DTranspose(filters, 3, padding="same")(x)
        x = layers.BatchNormalization()(x)

        x = layers.Activation("relu")(x)
        x = layers.Conv2DTranspose(filters, 3, padding="same")(x)
        x = layers.BatchNormalization()(x)

        x = layers.UpSampling2D(2)(x)

        # Project residual
        residual = layers.UpSampling2D(2)(previous_block_activation)
        residual = layers.Conv2D(filters, 1, padding="same")(residual)
        x = layers.add([x, residual])  # Add back residual
        previous_block_activation = x  # Set aside next residual

    # Add a per-pixel classification layer
    outputs = layers.Conv2D(num_classes,
                            3,
                            activation="softmax",
                            padding="same")(x)

    # Define the model
    model = keras.Model(inputs, outputs)
    return model
예제 #10
0
def _sep(x: Tensor,
         num_filters: int,
         kernel_size: int,
         name: str,
         strides: int = 1) -> Tensor:
    x = layers.ReLU(name=name + '_relu1')(x)
    x = layers.SeparableConv2D(num_filters,
                               kernel_size,
                               strides=strides,
                               padding='same',
                               use_bias=False,
                               kernel_initializer='he_normal',
                               kernel_regularizer=l2_reg,
                               name=name + '_conv1')(x)
    x = layers.BatchNormalization(epsilon=bn_eps,
                                  gamma_regularizer=l2_reg,
                                  name=name + '_bn1')(x)
    x = layers.ReLU(name=name + '_relu2')(x)
    x = layers.SeparableConv2D(num_filters,
                               kernel_size,
                               padding='same',
                               use_bias=False,
                               kernel_initializer='he_normal',
                               kernel_regularizer=l2_reg,
                               name=name + '_conv2')(x)
    x = layers.BatchNormalization(epsilon=bn_eps,
                                  gamma_regularizer=l2_reg,
                                  name=name + '_bn2')(x)
    return x
예제 #11
0
def residual_block(x, n_filters):
    """ Create a residual block using Depthwise Separable Convolutions
        x        : input into residual block
        n_filters: number of filters
    """
    # Remember the input
    shortcut = x

    # First Depthwise Separable Convolution
    x = layers.SeparableConv2D(n_filters, (3, 3), padding='same')(x)
    x = layers.BatchNormalization()(x)
    x = layers.ReLU()(x)

    # Second depthwise Separable Convolution
    x = layers.SeparableConv2D(n_filters, (3, 3), padding='same')(x)
    x = layers.BatchNormalization()(x)
    x = layers.ReLU()(x)

    # Third depthwise Separable Convolution
    x = layers.SeparableConv2D(n_filters, (3, 3), padding='same')(x)
    x = layers.BatchNormalization()(x)
    x = layers.ReLU()(x)

    # Add the identity link to the output of the block
    x = layers.add([x, shortcut])
    return x
예제 #12
0
def exitFlow(x, n_classes):
    """ Create the exit flow section
        x         : input to the exit flow section
        n_classes : number of output classes
    """
    def classifier(x, n_classes):
        """ The output classifier
            x         : input to the classifier
            n_classes : number of output classes
        """
        # Global Average Pooling will flatten the 10x10 feature maps into 1D
        # feature maps
        x = layers.GlobalAveragePooling2D()(x)

        # Fully connected output layer (classification)
        x = layers.Dense(n_classes, activation='softmax')(x)
        return x

    # Remember the input
    shortcut = x

    # Strided convolution to double number of filters in identity link to
    # match output of residual block for the add operation (projection shortcut)
    shortcut = layers.Conv2D(1024, (1, 1), strides=(2, 2),
                             padding='same')(shortcut)
    shortcut = layers.BatchNormalization()(shortcut)

    # First Depthwise Separable Convolution
    # Dimensionality reduction - reduce number of filters
    x = layers.SeparableConv2D(728, (3, 3), padding='same')(x)
    x = layers.BatchNormalization()(x)

    # Second Depthwise Separable Convolution
    # Dimensionality restoration
    x = layers.SeparableConv2D(1024, (3, 3), padding='same')(x)
    x = layers.BatchNormalization()(x)
    x = layers.ReLU()(x)

    # Create pooled feature maps, reduce size by 75%
    x = layers.MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)

    # Add the projection shortcut to the output of the pooling layer
    x = layers.add([x, shortcut])

    # Third Depthwise Separable Convolution
    x = layers.SeparableConv2D(1556, (3, 3), padding='same')(x)
    x = layers.BatchNormalization()(x)
    x = layers.ReLU()(x)

    # Fourth Depthwise Separable Convolution
    x = layers.SeparableConv2D(2048, (3, 3), padding='same')(x)
    x = layers.BatchNormalization()(x)
    x = layers.ReLU()(x)

    # Create classifier section
    x = classifier(x, n_classes)

    return x
예제 #13
0
 def __init__(self,
              width,
              depth,
              num_anchors=9,
              separable_conv=True,
              freeze_bn=False,
              **kwargs):
     super(BoxNet, self).__init__(**kwargs)
     self.width = width
     self.depth = depth
     self.num_anchors = num_anchors
     self.separable_conv = separable_conv
     options = {
         'kernel_size': 3,
         'strides': 1,
         'padding': 'same',
         'bias_initializer': 'zeros',
     }
     if separable_conv:
         kernel_initializer = {
             'depthwise_initializer': initializers.VarianceScaling(),
             'pointwise_initializer': initializers.VarianceScaling(),
         }
         options.update(kernel_initializer)
         self.convs = [
             layers.SeparableConv2D(filters=width,
                                    name=f'{self.name}/box-{i}',
                                    **options) for i in range(depth)
         ]
         self.head = layers.SeparableConv2D(filters=num_anchors * 4,
                                            name=f'{self.name}/box-predict',
                                            **options)
     else:
         kernel_initializer = {
             'kernel_initializer':
             initializers.RandomNormal(mean=0.0, stddev=0.01, seed=None)
         }
         options.update(kernel_initializer)
         self.convs = [
             layers.Conv2D(filters=width,
                           name=f'{self.name}/box-{i}',
                           **options) for i in range(depth)
         ]
         self.head = layers.Conv2D(filters=num_anchors * 4,
                                   name=f'{self.name}/box-predict',
                                   **options)
     self.bns = [[
         layers.BatchNormalization(momentum=MOMENTUM,
                                   epsilon=EPSILON,
                                   name=f'{self.name}/box-{i}-bn-{j}')
         for j in range(3, 8)
     ] for i in range(depth)]
     # self.bns = [[BatchNormalization(freeze=freeze_bn, name=f'{self.name}/box-{i}-bn-{j}') for j in range(3, 8)]
     #             for i in range(depth)]
     self.relu = layers.Lambda(lambda x: tf.nn.swish(x))
     self.reshape = layers.Reshape((-1, 4))
     self.level = 0
예제 #14
0
def make_model(input_shape, num_classes):
    '''
    The function creates a CNN model that can be seen by looking at the 
    model.summary(attribute)
    
    Parts of this model were changed to accept an input that is BGR
    '''
    inputs = keras.Input(shape=input_shape)
    # Image augmentation block
    x = data_augmentation(inputs)

    # Entry block
    x = layers.experimental.preprocessing.Rescaling(1. / 255)(x)
    x = layers.Conv2D(32, 3, strides=2, padding="same")(x)
    x = layers.BatchNormalization()(x)
    x = layers.Activation("relu")(x)
    x = layers.Conv2D(64, 3, padding="same")(x)
    x = layers.BatchNormalization()(x)
    x = layers.Activation("relu")(x)

    previous_block_activation = x  # Set aside residual

    for size in [128, 256, 512, 728]:
        x = layers.Activation("relu")(x)
        x = layers.SeparableConv2D(size, 3, padding="same")(x)
        x = layers.BatchNormalization()(x)

        x = layers.Activation("relu")(x)
        x = layers.SeparableConv2D(size, 3, padding="same")(x)
        x = layers.BatchNormalization()(x)

        x = layers.MaxPooling2D(3, strides=2, padding="same")(x)

        # Project residual
        residual = layers.Conv2D(size, 1, strides=2,
                                 padding="same")(previous_block_activation)
        x = layers.add([x, residual])  # Add back residual
        previous_block_activation = x  # Set aside next residual

    x = layers.SeparableConv2D(1024, 3, padding="same")(x)
    x = layers.BatchNormalization()(x)
    x = layers.Activation("relu")(x)

    x = layers.GlobalAveragePooling2D()(x)

    #if num_classes == 2:
    #    activation = "sigmoid"
    #    units = 1
    #else:
    #    activation = "softmax"
    #    units = num_classes
    activation = "softmax"
    units = num_classes

    x = layers.Dropout(0.5)(x)
    outputs = layers.Dense(units, activation=activation)(x)
    return keras.Model(inputs, outputs), x
예제 #15
0
def create_xception(input_shape=(48, 48, 1), num_classes=7):
    inputs = keras.Input(shape=input_shape)

    # Image augmentation block
    x = data_augmentation(inputs)

    # Entry block
    x = layers.experimental.preprocessing.Rescaling(1.0 / 255)(x)
    x = layers.Conv2D(32, 3, strides=2, padding="same")(x)
    x = layers.BatchNormalization()(x)
    x = layers.Activation("relu")(x)

    x = layers.Conv2D(64, 3, padding="same")(x)
    x = layers.BatchNormalization()(x)
    x = layers.Activation("relu")(x)

    previous_block_activation = x  # Set aside residual

    for size in [128, 256, 512, 728]:
        x = layers.Activation("relu")(x)
        x = layers.SeparableConv2D(size, 3, padding="same")(x)
        x = layers.BatchNormalization()(x)

        x = layers.Activation("relu")(x)
        x = layers.SeparableConv2D(size, 3, padding="same")(x)
        x = layers.BatchNormalization()(x)

        x = layers.MaxPooling2D(3, strides=2, padding="same")(x)

        # Project residual
        residual = layers.Conv2D(size, 1, strides=2,
                                 padding="same")(previous_block_activation)
        x = layers.add([x, residual])  # Add back residual
        previous_block_activation = x  # Set aside next residual

    x = layers.SeparableConv2D(1024, 3, padding="same")(x)
    x = layers.BatchNormalization()(x)
    x = layers.Activation("relu")(x)

    x = layers.GlobalAveragePooling2D()(x)
    if num_classes == 2:
        activation = "sigmoid"
        units = 1
    else:
        activation = "softmax"
        units = num_classes

    x = layers.Dropout(0.5)(x)
    outputs = layers.Dense(units, activation=activation)(x)

    model = keras.Model(inputs, outputs)
    model.compile(
        optimizer=keras.optimizers.Adam(),
        loss=keras.losses.SparseCategoricalCrossentropy(),
        metrics=['acc'],
    )
    return model
def conv_block(filters, inputs):
    x = layers.SeparableConv2D(filters, 3, activation="relu",
                               padding="same")(inputs)
    x = layers.SeparableConv2D(filters, 3, activation="relu",
                               padding="same")(x)
    x = layers.BatchNormalization()(x)
    outputs = layers.MaxPool2D()(x)

    return outputs
예제 #17
0
def make_model(input_shape, num_classes):
    """
	Creates a model based on a RNN architecture.
	-----
	:param <input_shape>: <class 'tuple'> ; image size in pixels
	:param <num_classes>: <class 'int'> ; batch size
	"""
    inputs = keras.Input(shape=input_shape)

    # Entry block
    x = layers.experimental.preprocessing.Rescaling(1.0 / 255)(inputs)
    x = Conv2D(32, kernel_size=(3, 3), strides=2, padding="same")(x)
    x = BatchNormalization()(x)
    x = Activation("relu")(x)

    x = Conv2D(64, kernel_size=(3, 3), padding="same")(x)
    x = BatchNormalization()(x)
    x = Activation("relu")(x)

    previous_block_activation = x  # Set aside residual

    for size in [32, 64, 128, 256]:

        x = layers.SeparableConv2D(size, kernel_size=(3, 3), padding="same")(x)
        x = BatchNormalization()(x)
        x = Activation("relu")(x)

        x = layers.SeparableConv2D(size, kernel_size=(3, 3), padding="same")(x)
        x = BatchNormalization()(x)
        x = Activation("relu")(x)

        x = MaxPooling2D((3, 3), strides=2, padding="same")(x)

        # Project residual
        residual = Conv2D(size, kernel_size=(1, 1), strides=2,
                          padding="same")(previous_block_activation)
        x = layers.add([x, residual])  # Add back residual
        previous_block_activation = x  # Set aside next residual

    x = layers.SeparableConv2D(128, kernel_size=(3, 3), padding="same")(x)
    x = BatchNormalization()(x)
    x = Activation("relu")(x)

    x = layers.GlobalAveragePooling2D()(x)
    if num_classes == 2:
        activation = "sigmoid"
        units = 1
    else:
        activation = "softmax"
        units = num_classes

    x = layers.Dropout(0.5)(x)
    outputs = layers.Dense(units, activation=activation)(x)
    return keras.Model(inputs, outputs)
예제 #18
0
 def DoubleConv2D(input_tensor, n_filters, ksize=3):
     u = L.SeparableConv2D(filters=n_filters,
                           kernel_size=ksize,
                           padding='same')(input_tensor)
     u = L.BatchNormalization()(u)
     u = L.Activation('relu')(u)
     u = L.SeparableConv2D(filters=n_filters,
                           kernel_size=ksize,
                           padding='same')(u)
     u = L.BatchNormalization()(u)
     u = L.Activation('relu')(u)
     return u
예제 #19
0
    def __init__(self, channels_in, channels_out, downsample=False):
        """
        Create new convolution block.

        Args:
            channels_in: The number of input channels.
            channels_out: The number of output channels.
        """
        super().__init__()
        input_shape = (None, None, channels_in)

        self.block = keras.Sequential()
        if downsample:
            self.block.add(SymmetricPadding(1))
            self.block.add(
                layers.MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
            self.block.add(SymmetricPadding(1))
            self.block.add(
                layers.SeparableConv2D(channels_out,
                                       3,
                                       padding="valid",
                                       input_shape=input_shape))
        else:
            self.block.add(SymmetricPadding(1))
            self.block.add(
                layers.SeparableConv2D(channels_out,
                                       3,
                                       padding="valid",
                                       input_shape=input_shape))
        self.block.add(layers.BatchNormalization())
        self.block.add(layers.ReLU())
        self.block.add(SymmetricPadding(1))
        self.block.add(
            layers.SeparableConv2D(channels_out,
                                   3,
                                   padding="valid",
                                   input_shape=input_shape))
        self.block.add(layers.BatchNormalization())
        self.block.add(layers.ReLU())

        if downsample:
            self.projection = layers.Conv2D(
                channels_out,
                1,
                padding="valid",
                input_shape=input_shape,
                strides=(2, 2),
            )
        else:
            self.projection = layers.Conv2D(channels_out,
                                            1,
                                            padding="valid",
                                            input_shape=input_shape)
예제 #20
0
def middle_flow(input, name="middle_flow"):
    x = layers.ReLU(name=name+"_Act_1")(input)
    x = layers.SeparableConv2D(728, 3, padding='same', name=name+"_Separable_1")(x)
    x = layers.BatchNormalization(name=name+"_BN_1")(x)
    x = layers.ReLU(name=name+"_Act_2")(x)
    x = layers.SeparableConv2D(728, 3, padding='same', name=name+"_Separable_2")(x)
    x = layers.BatchNormalization(name=name+"_BN_2")(x)
    x = layers.ReLU(name=name+"_Act_3")(x)
    x = layers.SeparableConv2D(728, 3, padding='same', name=name+"_Separable_3")(x)
    x = layers.BatchNormalization(name=name+"_BN_3")(x)
    x = layers.Add(name=name+"_Add")([input, x])
    return x
def make_model(input_shape, num_classes):
    inputs = keras.Input(shape=input_shape)
    # Image augmentation block
    # x = data_augmentation(inputs)

    # we'll be passing augmented data

    # Entry block
    x = layers.experimental.preprocessing.Rescaling(1.0 / 255)(inputs)
    x = layers.Conv2D(32, 3, strides=2, padding="same")(x)
    x = layers.BatchNormalization()(x)
    x = layers.Activation("relu")(x)

    x = layers.Conv2D(64, 3, padding="same")(x)
    x = layers.BatchNormalization()(x)
    x = layers.Activation("relu")(x)

    previous_block_activation = x  # Set aside residual

    for size in [128, 256, 512, 728]:
        x = layers.Activation("relu")(x)
        x = layers.SeparableConv2D(size, 3, padding="same")(x)
        x = layers.BatchNormalization()(x)

        x = layers.Activation("relu")(x)
        x = layers.SeparableConv2D(size, 3, padding="same")(x)
        x = layers.BatchNormalization()(x)

        x = layers.MaxPooling2D(3, strides=2, padding="same")(x)

        # Project residual
        residual = layers.Conv2D(size, 1, strides=2, padding="same")(
            previous_block_activation
        )
        x = layers.add([x, residual])  # Add back residual
        previous_block_activation = x  # Set aside next residual

    x = layers.SeparableConv2D(1024, 3, padding="same")(x)
    x = layers.BatchNormalization()(x)
    x = layers.Activation("relu")(x)

    x = layers.GlobalAveragePooling2D()(x)
    if num_classes == 2:
        activation = "sigmoid"
        units = 1
    else:
        activation = "softmax"
        units = num_classes

    x = layers.Dropout(0.5)(x)
    outputs = layers.Dense(units, activation=activation)(x)
    return keras.Model(inputs, outputs)
예제 #22
0
def make_model(input_shape, num_classes):
    data_augmentation = keras.Sequential([
        layers.experimental.preprocessing.RandomFlip("horizontal"),
        layers.experimental.preprocessing.RandomRotation(0.1),
    ])
    inputs = keras.Input(shape=input_shape)
    # Image augmentation block
    x = data_augmentation(inputs)

    # Entry block
    x = layers.experimental.preprocessing.Rescaling(1.0 / 255)(x)
    x = layers.Conv2D(32, 3, strides=2, padding="same")(x)
    x = layers.BatchNormalization()(x)
    x = layers.Activation("relu")(x)

    x = layers.Conv2D(64, 3, padding="same")(x)
    x = layers.BatchNormalization()(x)
    x = layers.Activation("relu")(x)

    previous_block_activation = x  # Set aside residual

    # had to use a for loop, can't type it over and over again
    for size in [128, 256, 512, 728]:
        x = layers.Activation("relu")(x)
        x = layers.SeparableConv2D(size, 3, padding="same")(x)
        x = layers.BatchNormalization()(x)

        x = layers.Activation("relu")(x)
        x = layers.SeparableConv2D(size, 3, padding="same")(x)
        x = layers.BatchNormalization()(x)

        x = layers.MaxPooling2D(3, strides=2, padding="same")(x)

        # Project residual
        residual = layers.Conv2D(size, 1, strides=2,
                                 padding="same")(previous_block_activation)
        x = layers.add([x, residual])  # Add back residual
        previous_block_activation = x  # Set aside next residual

    x = layers.SeparableConv2D(1024, 3, padding="same")(x)
    x = layers.BatchNormalization()(x)
    x = layers.Activation("relu")(x)

    # using softmax here cos Im working with multiple classes
    x = layers.GlobalAveragePooling2D()(x)
    activation = "softmax"
    units = num_classes

    # add dropouts to prevent overfitting
    x = layers.Dropout(0.5)(x)
    outputs = layers.Dense(units, activation=activation)(x)
    return keras.Model(inputs, outputs)
예제 #23
0
def make_model(input_shape):
    inputs = keras.Input(shape=input_shape)
    x = data_augmentation(inputs)

    x = layers.experimental.preprocessing.Rescaling(1.0 / 255)(x)
    x = layers.Conv2D(32, 3, strides=2, padding='same')(x)
    x = layers.BatchNormalization()(x)
    x = layers.Activation(tf.nn.relu)(x)

    x = layers.Conv2D(64, 3, padding='same')(x)
    x = layers.BatchNormalization()(x)
    x = layers.Activation(tf.nn.relu)(x)

    previous_block_activation = x

    #for size in [128, 256, 512, 728]:
    for size in [32, 64, 128, 256]:
        x = layers.Activation(tf.nn.relu)(x)
        x = layers.SeparableConv2D(size, 3, padding='same')(x)
        x = layers.BatchNormalization()(x)

        x = layers.Activation(tf.nn.relu)(x)
        x = layers.SeparableConv2D(size, 3, padding='same')(x)
        x = layers.BatchNormalization()(x)

        x = layers.MaxPooling2D(3, strides=1, padding='same')(x)

        residual = layers.Conv2D(size, 3, strides=1,
                                 padding='same')(previous_block_activation)
        x = layers.add([x, residual])
        previous_block_activation = x

    x = layers.SeparableConv2D(1024, 3, padding='same')(x)
    x = layers.BatchNormalization()(x)
    x = layers.Activation(tf.nn.relu)(x)
    ###added layer
    x = layers.Activation(tf.nn.relu)(x)
    x = layers.SeparableConv2D(size, 3, padding='same')(x)
    x = layers.BatchNormalization()(x)

    x = layers.Activation(tf.nn.relu)(x)
    x = layers.SeparableConv2D(size, 3, padding='same')(x)
    x = layers.BatchNormalization()(x)

    x = layers.MaxPooling2D(3, strides=2, padding='same')(x)
    ###
    x = layers.GlobalAveragePooling2D()(x)
    x = layers.Dropout(0.1)(x)

    outputs = layers.Dense(3, activation='softmax')(x)
    return keras.Model(inputs, outputs)
예제 #24
0
def make_model_keras(input_shape, num_classes):
    """
    This function define the DNN Model based on the Keras example.
    :param input_shape: The requested size of the image
    :param num_classes: In this classification problem, there are two classes: 1) Fire and 2) Non_Fire.
    :return: The built model is returned
    """
    inputs = keras.Input(shape=input_shape)
    # x = data_augmentation(inputs)  # 1) First option
    x = inputs  # 2) Second option

    x = layers.experimental.preprocessing.Rescaling(1.0 / 255)(x)
    # x = layers.Conv2D(32, 3, strides=2, padding="same")(x)
    x = layers.Conv2D(8, 3, strides=2, padding="same")(x)
    x = layers.BatchNormalization()(x)
    x = layers.Activation("relu")(x)

    previous_block_activation = x

    # for size in [128, 256, 512, 728]:
    for size in [8]:
        x = layers.Activation("relu")(x)
        x = layers.SeparableConv2D(size, 3, padding="same")(x)
        x = layers.BatchNormalization()(x)

        x = layers.Activation("relu")(x)
        x = layers.SeparableConv2D(size, 3, padding="same")(x)
        x = layers.BatchNormalization()(x)

        x = layers.MaxPooling2D(3, strides=2, padding="same")(x)

        residual = layers.Conv2D(size, 1, strides=2,
                                 padding="same")(previous_block_activation)

        x = layers.add([x, residual])
        previous_block_activation = x
    x = layers.SeparableConv2D(8, 3, padding="same")(x)
    x = layers.BatchNormalization()(x)
    x = layers.Activation("relu")(x)

    x = layers.GlobalAveragePooling2D()(x)
    if num_classes == 2:
        activation = "sigmoid"
        units = 1
    else:
        activation = "softmax"
        units = num_classes

    x = layers.Dropout(0.5)(x)
    outputs = layers.Dense(units, activation=activation)(x)
    return keras.Model(inputs, outputs, name="model_fire")
예제 #25
0
def build_model(img_height, img_width):
    inputs = Input(shape=(img_height, img_width, 3))

    x = layers.Conv2D(32, 3, strides=2, padding="same")(inputs)
    x = layers.BatchNormalization()(x)
    x = layers.Activation("relu")(x)

    x = layers.Conv2D(64, 3, padding="same")(x)
    x = layers.BatchNormalization()(x)
    x = layers.Activation("relu")(x)

    previous_block_activation = x  # Set aside residual

    for size in [128, 256, 512, 728]:
        x = layers.Activation("relu")(x)
        x = layers.SeparableConv2D(size, 3, padding="same")(x)
        x = layers.BatchNormalization()(x)

        x = layers.Activation("relu")(x)
        x = layers.SeparableConv2D(size, 3, padding="same")(x)
        x = layers.BatchNormalization()(x)

        x = layers.MaxPooling2D(3, strides=2, padding="same")(x)

        # Project residual
        residual = layers.Conv2D(size, 1, strides=2, padding="same")(
            previous_block_activation
        )
        x = layers.add([x, residual])  # Add back residual
        previous_block_activation = x  # Set aside next residual

    x = layers.SeparableConv2D(1024, 3, padding="same")(x)
    x = layers.BatchNormalization()(x)
    x = layers.Activation("relu")(x)

    x = layers.GlobalAveragePooling2D()(x)
    activation = "sigmoid"
    units = 1

    x = layers.Dropout(0.5)(x)
    outputs = layers.Dense(units, activation=activation)(x)

    model = Model(inputs=inputs, outputs=outputs)
    print(model.summary())

    model.compile(optimizer=Adam(1e-3), loss='binary_crossentropy', metrics=['accuracy'])
    model.fit(training_ds, epochs=10, batch_size=32, validation_data=testing_ds)
    save_model(model, "./best_model")
    return model
예제 #26
0
def make_model(input_shape, num_classes):
    inputs = keras.Input(shape=input_shape)

    x = layers.Conv2D(filters=32, kernel_size=3, strides=2, padding="same")(inputs)
    x = layers.BatchNormalization()(x)
    x = layers.Activation("relu")(x)

    x = layers.Conv2D(filters=64, kernel_size=3, padding="same")(x)
    x = layers.BatchNormalization()(x)
    x = layers.Activation("relu")(x)

    previous_block_activation = x  # Set aside residual

    for size in [128, 256, 512, 728]:
        x = layers.Activation("relu")(x)
        x = layers.SeparableConv2D(filters=size, kernel_size=3, padding="same")(x)
        x = layers.BatchNormalization()(x)

        x = layers.Activation("relu")(x)
        x = layers.SeparableConv2D(filters=size, kernel_size=3, padding="same")(x)
        x = layers.BatchNormalization()(x)

        x = layers.MaxPooling2D(pool_size=3, strides=2, padding="same")(x)

        # Project residual
        residual = layers.Conv2D(filters=size, kernel_size=1, strides=2, padding="same")(
            previous_block_activation
        )
        x = layers.add([x, residual])  # Add back residual
        previous_block_activation = x  # Set aside next residual

    x = layers.SeparableConv2D(filters=1024, kernel_size=3, padding="same")(x)
    x = layers.BatchNormalization()(x)
    x = layers.Activation("relu")(x)
    x = layers.GlobalAveragePooling2D()(x)

    if num_classes == 2:
        activation = "sigmoid"
        units = 1
    else:
        activation = "softmax"
        units = num_classes

    x = layers.Dropout(0.5)(x)
    outputs = layers.Dense(units, activation=activation)(x)

    model = keras.Model(inputs=inputs, outputs=outputs)

    return model
예제 #27
0
def sep_conv(x, num_filters, kernel_size=(3, 3), activation='relu'):
    if activation == 'selu':
        x = layers.SeparableConv2D(num_filters, kernel_size,
                                   activation='selu',
                                   padding='same',
                                   kernel_initializer='lecun_normal')(x)
    elif activation == 'relu':
        x = layers.SeparableConv2D(num_filters, kernel_size,
                                   padding='same',
                                   use_bias=False)(x)
        x = layers.BatchNormalization()(x)
        x = layers.Activation('relu')(x)
    else:
        ValueError('Unkown activation function: %s' % (activation,))
    return x
def make_model(input_shape,num_classes):
  inputs=keras.Input(shape=input_shape)
  x=data_augmen(inputs)
  x=layers.experimental.preprocessing.Rescaling(1.0 / 255)(x)
  x=layers.Conv2D(32,3,strides=2,padding="same")(x)
  x=layers.BatchNormalization()(x)
  x=layers.Activation("relu")(x)

  x=layers.Conv2D(64,3,padding="same")(x)
  x=layers.BatchNormalization()(x)
  x=layers.Activation("relu")(x)

  prev_blk_residual=x

  for size in [128, 256, 512, 728]:

      x=layers.Activation("relu")(x)
      x=layers.SeparableConv2D(size,3,padding="same")(x)
      x=layers.BatchNormalization()(x)

      x=layers.Activation("relu")(x)
      x=layers.SeparableConv2D(size,3,padding="same")(x)
      x=layers.BatchNormalization()(x)

      x=layers.MaxPooling2D(3,strides=2,padding="same")(x)


      res=layers.Conv2D(size,3,strides=2,padding="same")(prev_blk_residual)

      x=layers.add([x,res])
      prev_blk_residual=x

      x=layers.SeparableConv2D(1024,3,padding="same")(x)
      x=layers.BatchNormalization()(x)
      x=layers.Activation("relu")(x)

      x=layers.GlobalAveragePooling2D()(x)

      if num_classes == 2:
          activation="sigmoid"
          units=1;
      else:
          activation="softmax"
          units=num_classes

      x=layers.Dropout(0.5)(x)
      outputs=layers.Dense(units,activation=activation)(x)
      return keras.Model(inputs,outputs)
예제 #29
0
def block_up(input,
             conc,
             filters,
             drop=0.3,
             w_decay=0.0001,
             kernel_size=3,
             separable=False):
    x = layers.Conv2DTranspose(
        filters,
        (2, 2),
        strides=(2, 2),
        padding="same",
    )(input)
    for i in range(len(conc)):
        x = layers.concatenate([x, conc[i]])
    if separable:
        x = layers.SeparableConv2D(
            filters,
            (kernel_size, kernel_size),
            kernel_initializer="he_normal",
            padding="same",
            activation="elu",
        )(x)
        x = layers.SeparableConv2D(
            filters,
            (kernel_size, kernel_size),
            kernel_initializer="he_normal",
            padding="same",
            activation="elu",
        )(x)
    else:
        x = layers.Conv2D(
            filters,
            (kernel_size, kernel_size),
            kernel_initializer="he_normal",
            padding="same",
            activation="elu",
        )(x)
        x = layers.Conv2D(
            filters,
            (kernel_size, kernel_size),
            kernel_initializer="he_normal",
            padding="same",
            activation="elu",
        )(x)
    x = layers.BatchNormalization()(x)
    x = layers.Dropout(drop)(x)
    return x
예제 #30
0
def SeparableConvBlock(num_channels,
                       kernel_size,
                       strides,
                       name,
                       freeze_bn=False):
    """
    Builds a small block consisting of a depthwise separable convolution layer and a batch norm layer
    Args:
        num_channels: Number of channels used in the BiFPN
        kernel_size: Kernel site of the depthwise separable convolution layer
        strides: Stride of the depthwise separable convolution layer
        name: Name of the block
        freeze_bn: Boolean indicating if the batch norm layers should be freezed during training or not.
    
    Returns:
       The depthwise separable convolution block
    """
    f1 = layers.SeparableConv2D(num_channels,
                                kernel_size=kernel_size,
                                strides=strides,
                                padding='same',
                                use_bias=True,
                                name=f'{name}/conv')
    f2 = BatchNormalization(freeze=freeze_bn,
                            momentum=MOMENTUM,
                            epsilon=EPSILON,
                            name=f'{name}/bn')
    return reduce(lambda f, g: lambda *args, **kwargs: g(f(*args, **kwargs)),
                  (f1, f2))