def cnn_v1_separable_conv():
    input_tensor = Input(shape=(IMG_SIZE, IMG_SIZE, 3), name='4d_input')

    x = layers.SeparableConv2D(32, (3, 3), padding='same',
                               activation='relu')(input_tensor)
    x = layers.MaxPooling2D(pool_size=(2, 2))(x)
    x = layers.BatchNormalization()(x)
    x = layers.Dropout(0.2)(x)

    x = layers.SeparableConv2D(64, (3, 3), padding='same',
                               activation='relu')(x)
    x = layers.MaxPooling2D(pool_size=(2, 2))(x)
    x = layers.BatchNormalization()(x)
    x = layers.Dropout(0.3)(x)

    x = layers.SeparableConv2D(128, (3, 3), padding='same',
                               activation='relu')(x)
    x = layers.MaxPooling2D(pool_size=(2, 2))(x)
    x = layers.BatchNormalization()(x)
    x = layers.Dropout(0.4)(x)

    x = layers.Flatten()(x)
    x = layers.Dense(1024, activation='relu')(x)
    x = layers.BatchNormalization()(x)
    x = layers.Dropout(0.5)(x)
    output_tensor = layers.Dense(NUM_CLASSES, activation='softmax')(x)
    model = Model(input_tensor, output_tensor)
    return model
Exemplo n.º 2
0
    def __init__(self, width, depth, num_classes=20, num_anchors=9, freeze_bn=False, name='class_net', **kwargs):
        self.name = name
        self.width = width
        self.depth = depth
        self.num_classes = num_classes
        self.num_anchors = num_anchors
        options = {
            'kernel_size': 3,
            'strides': 1,
            'padding': 'same',
            'depthwise_initializer': initializers.VarianceScaling(),
            'pointwise_initializer': initializers.VarianceScaling(),
        }

        self.convs = [layers.SeparableConv2D(filters=width, bias_initializer='zeros', name=f'{self.name}/class-{i}',
                                                **options)
                        for i in range(depth)]
        self.head = layers.SeparableConv2D(filters=num_classes * num_anchors,
                                            bias_initializer=PriorProbability(probability=0.01),
                                            name=f'{self.name}/class-predict', **options)

        self.bns = [
            [layers.BatchNormalization(momentum=MOMENTUM, epsilon=EPSILON, name=f'{self.name}/class-{i}-bn-{j}') for j
             in range(3, 8)]
            for i in range(depth)]

        self.relu = layers.Lambda(lambda x: tf.nn.swish(x))
        self.reshape = layers.Reshape((-1, num_classes))
        self.activation = layers.Activation('sigmoid')
    def create_model(self, space: Optional[Dict[str, Any]] = None) -> Model:
        if space:
            print('Using hyperopt space:')
            print(space)

        # for_optimization = True if space else False

        img_input = Input(shape=(1384, 865, 3), dtype='float32')
        x = layers.SeparableConv2D(256,
                                   20,
                                   strides=(10, 10),
                                   activation='relu')(img_input)
        x = layers.SeparableConv2D(512, 7, strides=(2, 2),
                                   activation='relu')(x)
        x = layers.SeparableConv2D(1024, 7, strides=(2, 2),
                                   activation='relu')(x)
        x = layers.SeparableConv2D(1024, 7, strides=(2, 2),
                                   activation='relu')(x)
        x = layers.Flatten()(x)
        x = layers.Dense(128, activation='relu')(x)
        rot_y_pred = layers.Dense(1, name='rot_y')(x)

        model = Model(img_input, rot_y_pred)
        model.compile(optimizer='adam', loss='mse', metrics=['mae'])
        return model
Exemplo n.º 4
0
    def __init__(self, width, depth, num_anchors=9, name='box_net', **kwargs):
        self.name = name
        self.width = width
        self.depth = depth
        self.num_anchors = num_anchors
        options = {
            'kernel_size': 3,
            'strides': 1,
            'padding': 'same',
            'bias_initializer': 'zeros',
            'depthwise_initializer': initializers.VarianceScaling(),
            'pointwise_initializer': initializers.VarianceScaling(),
        }

        self.convs = [
            layers.SeparableConv2D(filters=width,
                                   name=f'{self.name}/box-{i}',
                                   **options) for i in range(depth)
        ]
        self.head = layers.SeparableConv2D(filters=num_anchors * 4,
                                           name=f'{self.name}/box-predict',
                                           **options)

        self.bns = [[
            layers.BatchNormalization(momentum=MOMENTUM,
                                      epsilon=EPSILON,
                                      name=f'{self.name}/box-{i}-bn-{j}')
            for j in range(3, 8)
        ] for i in range(depth)]

        self.relu = layers.Lambda(lambda x: tf.nn.swish(x))
        self.reshape = layers.Reshape((-1, 4))
Exemplo n.º 5
0
def residual_block_entry(x, nb_filters):
    """ Create a residual block using Depthwise Separable Convolutions
        x         : input into residual block
        nb_filters: number of filters
    """
    shortcut = x

    # First Depthwise Separable Convolution
    x = layers.SeparableConv2D(nb_filters, (3, 3), padding='same')(x)
    x = layers.BatchNormalization()(x)
    x = layers.ReLU()(x)

    # Second depthwise Separable Convolution
    x = layers.SeparableConv2D(nb_filters, (3, 3), padding='same')(x)
    x = layers.BatchNormalization()(x)
    x = layers.ReLU()(x)

    # Create pooled feature maps, reduce size by 75%
    x = layers.MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)

    # Add strided convolution to identity link to double number of filters to
    # match output of residual block for the add operation
    shortcut = layers.Conv2D(nb_filters, (1, 1),
                             strides=(2, 2),
                             padding='same')(shortcut)
    shortcut = layers.BatchNormalization()(shortcut)

    x = layers.add([x, shortcut])

    return x
Exemplo n.º 6
0
    def create_model(self, space: Optional[Dict[str, Any]] = None) -> Model:
        if space:
            print('Using hyperopt space:')
            print(space)

        img_input = Input(shape=(640, 400, 3), dtype='float32')
        x = layers.SeparableConv2D(32, 5, activation='relu')(img_input)
        x = layers.MaxPooling2D(2)(x)
        # x = inception_test_module(img_input, 32)

        x = layers.SeparableConv2D(64, 5, activation='relu')(x)
        x = layers.MaxPooling2D(2)(x)
        # x = inception_test_module(x, 64)

        x = layers.SeparableConv2D(128, 5, activation='relu')(x)
        x = layers.MaxPooling2D(2)(x)
        # x = inception_test_module(x, 128)

        # x = layers.SeparableConv2D(128, 5, activation='relu')(x)
        # x = layers.MaxPooling2D(2)(x)
        x = inception_test_module(x, 128)

        # x = layers.SeparableConv2D(128, 5, activation='relu')(x)
        # x = layers.MaxPooling2D(2)(x)
        x = inception_test_module(x, 128)

        x = layers.Flatten()(x)
        x = layers.Dense(512, activation='relu')(x)
        rot_x_pred = layers.Dense(1, name='rot_x')(x)

        model = Model(img_input, rot_x_pred)
        model.compile(optimizer='rmsprop', loss='mae')
        return model
Exemplo n.º 7
0
def identity_block(input_tensor,
                   kernel_size,
                   filters,
                   stage,
                   block,
                   bn_axis=3):
    filters1, filters2, filters3 = filters
    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    x = layers.SeparableConv2D(filters1, (1, 1),
                               kernel_initializer='he_normal',
                               name=conv_name_base + '2a')(input_tensor)
    x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
    x = layers.Activation('relu')(x)

    x = layers.SeparableConv2D(filters2,
                               kernel_size,
                               padding='same',
                               kernel_initializer='he_normal',
                               name=conv_name_base + '2b')(x)
    x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
    x = layers.Activation('relu')(x)

    x = layers.SeparableConv2D(filters3, (1, 1),
                               kernel_initializer='he_normal',
                               name=conv_name_base + '2c')(x)
    x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)

    x = layers.add([x, input_tensor])
    x = layers.Activation('relu')(x)
    return x
Exemplo n.º 8
0
def _separable_conv_block(ip,
                          filters,
                          kernel_size=(3, 3),
                          strides=(1, 1),
                          block_id=None):
    '''Adds 2 blocks of [relu-separable conv-batchnorm].

    # Arguments
        ip: Input tensor
        filters: Number of output filters per layer
        kernel_size: Kernel size of separable convolutions
        strides: Strided convolution for downsampling
        block_id: String block_id

    # Returns
        A Keras tensor
    '''
    channel_dim = 1 if backend.image_data_format() == 'channels_first' else -1

    with backend.name_scope('separable_conv_block_%s' % block_id):
        x = layers.Activation('relu')(ip)
        if strides == (2, 2):
            x = layers.ZeroPadding2D(
                padding=correct_pad(backend, x, kernel_size),
                name='separable_conv_1_pad_%s' % block_id)(x)
            conv_pad = 'valid'
        else:
            conv_pad = 'same'
        x = layers.SeparableConv2D(filters,
                                   kernel_size,
                                   strides=strides,
                                   name='separable_conv_1_%s' % block_id,
                                   padding=conv_pad,
                                   use_bias=False,
                                   depthwise_regularizer=l2(weight_decay),
                                   pointwise_regularizer=l2(weight_decay),
                                   kernel_initializer='he_normal')(x)
        if use_bn:
            x = layers.BatchNormalization(axis=channel_dim,
                                          momentum=bn_momentum,
                                          epsilon=1e-3,
                                          name='separable_conv_1_bn_%s' %
                                          (block_id))(x)
        x = layers.Activation('relu')(x)
        x = layers.SeparableConv2D(filters,
                                   kernel_size,
                                   name='separable_conv_2_%s' % block_id,
                                   padding='same',
                                   use_bias=False,
                                   depthwise_regularizer=l2(weight_decay),
                                   pointwise_regularizer=l2(weight_decay),
                                   kernel_initializer='he_normal')(x)
        if use_bn:
            x = layers.BatchNormalization(axis=channel_dim,
                                          momentum=bn_momentum,
                                          epsilon=1e-3,
                                          name='separable_conv_2_bn_%s' %
                                          (block_id))(x)
    return x
def get_model(img_size, num_classes):
    inputs = keras.Input(shape=img_size +
                         (3, ))  #(img_size + (3,)) = (160, 160, 3)

    ### [First half of the network: downsampling inputs] ###

    # Entry block
    x = layers.Conv2D(32, 3, strides=2, padding="same")(inputs)
    x = layers.BatchNormalization()(x)
    x = layers.Activation("relu")(x)

    previous_block_activation = x  # Set aside residual

    # Blocks 1, 2, 3 are identical apart from the feature depth.
    for filters in [64, 128, 256, 512]:
        x = layers.Activation("relu")(x)
        x = layers.SeparableConv2D(filters, 3, padding="same")(x)
        x = layers.BatchNormalization()(x)

        x = layers.Activation("relu")(x)
        x = layers.SeparableConv2D(filters, 3, padding="same")(x)
        x = layers.BatchNormalization()(x)

        x = layers.MaxPooling2D(3, strides=2, padding="same")(x)

        # Project residual
        residual = layers.Conv2D(filters, 1, strides=2,
                                 padding="same")(previous_block_activation)
        x = layers.add([x, residual])  # Add back residual
        previous_block_activation = x  # Set aside next residual

    ### [Second half of the network: upsampling inputs] ###

    for filters in [512, 256, 128, 64, 32]:
        x = layers.Activation("relu")(x)
        x = layers.Conv2DTranspose(filters, 3, padding="same")(x)
        x = layers.BatchNormalization()(x)

        x = layers.Activation("relu")(x)
        x = layers.Conv2DTranspose(filters, 3, padding="same")(x)
        x = layers.BatchNormalization()(x)

        x = layers.UpSampling2D(2)(x)

        # Project residual
        residual = layers.UpSampling2D(2)(previous_block_activation)
        residual = layers.Conv2D(filters, 1, padding="same")(residual)
        x = layers.add([x, residual])  # Add back residual
        previous_block_activation = x  # Set aside next residual

    # Add a per-pixel classification layer
    outputs = layers.Conv2D(num_classes,
                            3,
                            activation="softmax",
                            padding="same")(x)

    # Define the model
    model = keras.Model(inputs, outputs)
    return model
Exemplo n.º 10
0
def ExitFlow(layer_input):

    layer_skip = layers.Conv2D(filters=512,
                               kernel_size=1,
                               strides=2,
                               padding="same",
                               activation="relu",
                               kernel_initializer="he_normal")(layer_input)

    layer1_depth = layers.DepthwiseConv2D(
        kernel_size=3,
        padding="same",
        activation="relu",
        kernel_initializer="he_normal")(layer_input)
    layer1_BN = layers.BatchNormalization()(layer1_depth)
    layer1_separa = layers.SeparableConv2D(
        filters=256,
        kernel_size=1,
        padding="same",
        activation="relu",
        kernel_initializer="he_normal")(layer1_BN)

    layer2_depth = layers.DepthwiseConv2D(
        kernel_size=3,
        padding="same",
        activation="relu",
        kernel_initializer="he_normal")(layer1_separa)
    layer2_BN = layers.BatchNormalization()(layer2_depth)
    layer2_separa = layers.SeparableConv2D(
        filters=512,
        kernel_size=1,
        padding="same",
        activation="relu",
        kernel_initializer="he_normal")(layer2_BN)

    layer3_depth = layers.DepthwiseConv2D(
        kernel_size=3,
        padding="same",
        activation="relu",
        kernel_initializer="he_normal")(layer2_separa)
    layer3_BN = layers.BatchNormalization()(layer3_depth)
    layer3_separa = layers.SeparableConv2D(
        filters=512,
        kernel_size=1,
        strides=2,
        padding="same",
        activation="relu",
        kernel_initializer="he_normal")(layer3_BN)

    layer_process = layers.add([layer3_separa, layer_skip])

    layer1 = MiddleFlow_unit(layer_process, 1024)
    layer2 = MiddleFlow_unit(layer1, 1024)
    layer_out = MiddleFlow_unit(layer2, 2048)

    return layer_out
Exemplo n.º 11
0
def EntryFlow(layer_input, layer_skip, filters):

    layer_skip = layers.Conv2D(filters,
                               kernel_size=1,
                               strides=2,
                               padding="same",
                               activation="relu",
                               kernel_initializer="he_normal")(layer_input)
    # one separable_depthwise
    layer1_depth = layers.DepthwiseConv2D(
        kernel_size=3,
        padding="same",
        activation="relu",
        kernel_initializer="he_normal")(layer_input)
    layer1_BN = layers.BatchNormalization()(layer1_depth)
    layer1_separa = layers.SeparableConv2D(
        filters,
        kernel_size=1,
        padding="same",
        activation="relu",
        kernel_initializer="he_normal")(layer1_BN)

    layer2_depth = layers.DepthwiseConv2D(
        kernel_size=3,
        padding="same",
        activation="relu",
        kernel_initializer="he_normal")(layer1_separa)
    layer2_BN = layers.BatchNormalization()(layer2_depth)
    layer2_separa = layers.SeparableConv2D(
        filters,
        kernel_size=1,
        padding="same",
        activation="relu",
        kernel_initializer="he_normal")(layer2_BN)

    layer3_depth = layers.DepthwiseConv2D(
        kernel_size=3,
        padding="same",
        activation="relu",
        kernel_initializer="he_normal")(layer2_separa)
    layer3_BN = layers.BatchNormalization()(layer3_depth)
    layer3_separa = layers.SeparableConv2D(
        filters,
        kernel_size=3,
        strides=2,
        padding="same",
        activation="relu",
        kernel_initializer="he_normal")(layer3_BN)

    print(layer3_separa.shape, layer_skip.shape)
    block_out = layers.add([layer_skip, layer3_separa])

    return block_out, layer_skip
Exemplo n.º 12
0
def exitFlow(x):
    """ Create the exit flow section
        x : input tensor into section
    """
    def classifier(x):
        """ The output classifier
            x : input tensor
        """
        # Global Average Pooling will flatten the 10x10 feature maps into 1D
        # feature maps
        x = layers.GlobalAveragePooling2D()(x)
        # Fully connected output layer (classification)
        x = layers.Dense(1000)(x)
        return x

    shortcut = x

    # First Depthwise Separable Convolution
    x = layers.SeparableConv2D(728, (3, 3), padding='same')(x)
    x = layers.BatchNormalization()(x)

    # Second Depthwise Separable Convolution
    x = layers.SeparableConv2D(1024, (3, 3), padding='same')(x)
    x = layers.BatchNormalization()(x)
    x = layers.ReLU()(x)

    # Create pooled feature maps, reduce size by 75%
    x = layers.MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)

    # Add strided convolution to identity link to double number of filters to
    # match output of residual block for the add operation
    shortcut = layers.Conv2D(1024, (1, 1), strides=(2, 2),
                             padding='same')(shortcut)
    shortcut = layers.BatchNormalization()(shortcut)

    x = layers.add([x, shortcut])

    # Third Depthwise Separable Convolution
    x = layers.SeparableConv2D(1556, (3, 3), padding='same')(x)
    x = layers.BatchNormalization()(x)
    x = layers.ReLU()(x)

    # Fourth Depthwise Separable Convolution
    x = layers.SeparableConv2D(2048, (3, 3), padding='same')(x)
    x = layers.BatchNormalization()(x)
    x = layers.ReLU()(x)

    # Create classifier section
    x = classifier(x)

    return x
Exemplo n.º 13
0
def big_XCEPTION(input_shape, num_classes):
    img_input = layers.Input(input_shape)
    x = layers.Conv2D(32, (3, 3), strides=(2, 2), use_bias=False)(img_input)
    x = layers.BatchNormalization(name='block1_conv1_bn')(x)
    x = layers.Activation('relu', name='block1_conv1_act')(x)
    x = layers.Conv2D(64, (3, 3), use_bias=False)(x)
    x = layers.BatchNormalization(name='block1_conv2_bn')(x)
    x = layers.Activation('relu', name='block1_conv2_act')(x)

    residual = layers.Conv2D(128, (1, 1),
                             strides=(2, 2),
                             padding='same',
                             use_bias=False)(x)
    residual = layers.BatchNormalization()(residual)

    x = layers.SeparableConv2D(128, (3, 3), padding='same', use_bias=False)(x)
    x = layers.BatchNormalization(name='block2_sepconv1_bn')(x)
    x = layers.Activation('relu', name='block2_sepconv2_act')(x)
    x = layers.SeparableConv2D(128, (3, 3), padding='same', use_bias=False)(x)
    x = layers.BatchNormalization(name='block2_sepconv2_bn')(x)

    x = layers.MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
    x = layers.add([x, residual])

    residual = layers.Conv2D(256, (1, 1),
                             strides=(2, 2),
                             padding='same',
                             use_bias=False)(x)
    residual = layers.BatchNormalization()(residual)

    x = layers.Activation('relu', name='block3_sepconv1_act')(x)
    x = layers.SeparableConv2D(256, (3, 3), padding='same', use_bias=False)(x)
    x = layers.BatchNormalization(name='block3_sepconv1_bn')(x)
    x = layers.Activation('relu', name='block3_sepconv2_act')(x)
    x = layers.SeparableConv2D(256, (3, 3), padding='same', use_bias=False)(x)
    x = layers.BatchNormalization(name='block3_sepconv2_bn')(x)

    x = layers.MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
    x = layers.add([x, residual])
    x = layers.Conv2D(
        num_classes,
        (3, 3),
        # kernel_regularizer=regularization,
        padding='same')(x)
    x = layers.GlobalAveragePooling2D()(x)
    output = layers.Activation('softmax', name='predictions')(x)

    model = models.Model(img_input, output)
    return model
Exemplo n.º 14
0
def build_model():
    model = models.Sequential()
    model.add(layers.SeparableConv2D(32, (3,3),activation = 'relu', input_shape = (150,150,3)))
    model.add(layers.MaxPooling2D((2,2)))
    model.add(layers.SeparableConv2D(64, (3,3),activation = 'relu'))
    model.add(layers.MaxPooling2D((2,2)))
    model.add(layers.SeparableConv2D(128, (3,3),activation = 'relu'))
    model.add(layers.MaxPooling2D((2,2)))
    model.add(layers.SeparableConv2D(128, (3,3),activation = 'relu'))
    model.add(layers.MaxPooling2D((2,2)))
    model.add(layers.Flatten())
    model.add(layers.Dropout(0.5))
    model.add(layers.Dense(512, activation='relu'))
    model.add(layers.Dense(4, activation='softmax'))
    return model
Exemplo n.º 15
0
def basic_block(y, K, args, ishape=0, residual=0, tlist=[]):

    if (residual):
        x = y

    str = np.ones(args.autonconv)
    if (residual):
        str[args.autonconv - 1] = 2
    str = np.int32(str)

    for i in range(args.autonconv):
        if (args.autocdwise == True):
            y = layers.SeparableConv2D(K,
                                       kernel_size=(3, 3),
                                       strides=(str[i], str[i]),
                                       padding='same')(y)
        else:
            y = layers.Conv2D(K,
                              kernel_size=(3, 3),
                              strides=(str[i], str[i]),
                              padding='same')(y)

        if (args.autonobn == False):
            y = layers.BatchNormalization()(y)
            if (args.da_gauss != 0.0):
                y = layers.GaussianNoise(0.3)(y)

        if (residual == 0) | (i < args.autonconv - 1):
            y = layers.ReLU()(y)
            tlist.append(y)

    if (residual):
        if (args.autocdwise == True):
            x = layers.SeparableConv2D(K,
                                       kernel_size=(1, 1),
                                       strides=(2, 2),
                                       padding='same')(x)
        else:
            x = layers.Conv2D(K,
                              kernel_size=(1, 1),
                              strides=(2, 2),
                              padding='same')(x)
        y = layers.add([x, y])
        y = layers.ReLU()(y)
        tlist.append(y)
    else:
        y = layers.MaxPooling2D(pool_size=(2, 2))(y)
    return y
Exemplo n.º 16
0
def gen_model():
    model = models.Sequential()
    model.add(layers.Conv2D(32, (3, 3), activation='relu',
                            input_shape=(96, 96, 3)))
    model.add(layers.MaxPooling2D((2, 2)))
    model.add(layers.SeparableConv2D(64, (3, 3), activation='relu'))
    model.add(layers.MaxPooling2D((2, 2)))
    model.add(layers.SeparableConv2D(64, (3, 3), activation='relu'))
    model.add(layers.MaxPooling2D((2, 2)))
    model.add(layers.SeparableConv2D(64, (3, 3), activation='relu'))
    model.add(layers.MaxPooling2D((2, 2)))
    model.add(layers.Flatten())
    model.add(layers.Dropout(0.5))
    model.add(layers.Dense(256, activation='relu'))
    model.add(layers.Dense(1, activation='sigmoid'))
    return model
Exemplo n.º 17
0
def _dw_conv_block(inp, convs, do_skip=True):
    x = inp
    count = 0

    for conv in convs:
        if count == (len(convs) - 2) and do_skip:
            skip_connection = x
        count += 1

        if conv['stride'] > 1:
            x = layers.ZeroPadding2D(((1, 0), (1, 0)))(
                x)  # unlike tensorflow darknet prefer left and top paddings
        x = layers.SeparableConv2D(
            conv['filter'],
            conv['kernel'],
            strides=conv['stride'],
            padding='valid' if conv['stride'] > 1 else 'same',
            # unlike tensorflow darknet prefer left and top paddings
            name='conv_' + str(conv['layer_idx']),
            use_bias=False if conv['bnorm'] else True)(x)
        if conv['bnorm']:
            x = layers.BatchNormalization(epsilon=0.001,
                                          name='bnorm_' +
                                          str(conv['layer_idx']))(x)
        if conv['leaky']:
            x = layers.LeakyReLU(alpha=0.1,
                                 name='leaky_' + str(conv['layer_idx']))(x)

    return layers.add([skip_connection, x]) if do_skip else x
Exemplo n.º 18
0
def GenerateModel(n_filter=32,
                  number_of_class=1,
                  input_shape=(48, 48),
                  number_of_channel=6,
                  activation_last='sigmoid',
                  metrics=['mse', 'acc'],
                  loss='mse',
                  optimizer='adam',
                  dropout=0.5,
                  init='glorot_uniform'):
    ########
    #
    #
    #######
    init_X = init  #keras.initializers.VarianceScaling(scale=1.0, mode='fan_in', distribution='normal', seed=None)
    filter_size = n_filter * 4
    model = Sequential()
    shape_default = (input_shape[0], input_shape[1], number_of_channel)
    model.add(
        layers.SeparableConv2D(filters=filter_size,
                               input_shape=shape_default,
                               kernel_size=(1, 1),
                               strides=(1, 1),
                               padding='same',
                               activation='selu',
                               use_bias=True,
                               depthwise_initializer=init_X,
                               pointwise_initializer=init_X))
    model.add(
        layers.Conv2D(filters=filter_size,
                      kernel_size=(3, 3),
                      strides=2,
                      padding='same'))
    model.add(layers.BatchNormalization(scale=True))
    model.add(layers.Activation('relu'))
    for kernel_size in ((1, 3), (3, 1), (1, 1)):
        model.add(
            layers.Conv2D(filters=(filter_size // 2),
                          kernel_size=kernel_size,
                          strides=1,
                          padding='same'))
        model.add(layers.BatchNormalization(scale=True))
        model.add(layers.Activation('relu'))
    #model.add(layers.Dropout(0.2))
    for i in [3, 4]:
        model.add(
            layers.Conv2D(filters=(filter_size // i),
                          kernel_size=(3, 3),
                          strides=1,
                          padding='same'))
        model.add(layers.BatchNormalization(scale=True))
        model.add(layers.Activation('relu'))
        model.add(layers.MaxPooling2D((2, 2), padding='valid'))

    model.add(layers.Dropout(dropout))
    model.add(layers.GlobalAveragePooling2D())
    model.add(layers.Dense(256, activation='relu'))
    model.add(layers.Dense(number_of_class, activation=activation_last))
    model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
    return model
Exemplo n.º 19
0
def WasterNet(input_shape=None, classes=40):
    input = layers.Input(shape=input_shape)
    x = layers.ZeroPadding2D(padding=(3, 3))(input)

    x = layers.Conv2D(filters=32, kernel_size=(3, 3), strides=(1, 1))(x)
    x = layers.BatchNormalization(axis=bn_axis)(x)
    x = return_activation(x, 'HS')
    x = layers.ZeroPadding2D(padding=(1, 1))(x)

    x = layers.MaxPooling2D((3, 3), strides=(2, 2))(x)

    x = _wasnet_block(x, filter=64, strides=(2, 2), nl='RE')

    x = _wasnet_block(x, filter=128, strides=(2, 2), nl='HS')

    x = _wasnet_block(x, filter=256, strides=(2, 2), nl='HS')

    x = _wasnet_block(x, filter=512, strides=(2, 2), nl='HS')

    x = layers.SeparableConv2D(512, (3, 3), padding='same')(x)
    x = layers.BatchNormalization(axis=bn_axis)(x)
    x = return_activation(x, 'HS')

    x = layers.GlobalAveragePooling2D()(x)
    x = layers.Dropout(0.3)(x)
    x = layers.Dense(classes, activation='softmax')(x)
    model = models.Model(
        input,
        x,
    )

    # model.summary( )
    return model
Exemplo n.º 20
0
	def layer(inputs):
		#non-trainable convolutions
		conv = layers.SeparableConv2D(filters=dim_capsules[0]*(dim_capsules[1]*dim_capsules[2] +1), \
			kernel_size=kernel_size, strides=strides, padding=padding, depth_multiplier=1, 
			depthwise_initializer='ones', pointwise_initializer=initializers.Constant(value=1/(kernel_size*kernel_size)), use_bias=False, name=name)
		conv.trainable = False
		return conv(inputs)
Exemplo n.º 21
0
 def __init__(self, filters, kernel_size,
              activation, initializer='glorot_uniform',
              batchnorm=True, use_bias=False, name=None,
              strides=1, separable=False):
     self.filters = filters
     self.kernel_size = kernel_size
     self.activation = activation
     self.initializer = initializer
     self.use_bias = use_bias
     self.strides = strides
     if activation != 'selu' and batchnorm:
         self.batchnorm = True
     else:
         self.batchnorm = False
     if activation.lower() == 'selu':
         self.initializer = 'lecun_normal'
     elif activation.lower() == 'linear':
         self.initializer = 'glorot_uniform'
     if separable:
         self.conv2d = layers.SeparableConv2D(self.filters, self.kernel_size, padding='same', 
                                              activation=self.activation, use_bias=self.use_bias,
                                              strides=self.strides, depthwise_initializer=self.initializer,
                                              pointwise_initializer=self.initializer, name=name)
     else:
         self.conv2d = layers.Conv2D(self.filters, self.kernel_size,
                                     padding='same', activation='linear',
                                     use_bias=self.use_bias, strides=self.strides,
                                     kernel_initializer=self.initializer,
                                     name=name)
     if self.batchnorm:
         self.batch_normalization = layers.BatchNormalization()
     self.activate = layers.Activation(self.activation)
Exemplo n.º 22
0
def xception(i):
    # [First half of the network: downsampling inputs]

    x = layers.Conv2D(8, 3, strides=2, padding="same")(i)
    x = layers.BatchNormalization()(x)
    x = layers.Activation("relu")(x)

    previous_block_activation = x  # Set aside residual

    for filters in [16, 32, 64]:
        x = layers.Activation("relu")(x)
        x = layers.SeparableConv2D(filters, 3, padding="same")(x)
        x = layers.BatchNormalization()(x)

        x = layers.Activation("relu")(x)
        x = layers.SeparableConv2D(filters, 3, padding="same")(x)
        x = layers.BatchNormalization()(x)

        x = layers.MaxPooling2D(3, strides=2, padding="same")(x)

        # Project residual
        residual = layers.Conv2D(filters, 1, strides=2,
                                 padding="same")(previous_block_activation)
        x = layers.add([x, residual])  # Add back residual
        previous_block_activation = x  # Set aside next residual

    # [Second half of the network: upsampling inputs]

    for filters in [96, 64, 32, 16]:
        x = layers.Activation("relu")(x)
        x = layers.Conv2DTranspose(filters, 3, padding="same")(x)
        x = layers.BatchNormalization()(x)

        x = layers.Activation("relu")(x)
        x = layers.Conv2DTranspose(filters, 3, padding="same")(x)
        x = layers.BatchNormalization()(x)

        x = layers.UpSampling2D(2)(x)

        # Project residual
        residual = layers.UpSampling2D(2)(previous_block_activation)
        residual = layers.Conv2D(filters, 1, padding="same")(residual)
        x = layers.add([x, residual])  # Add back residual
        previous_block_activation = x  # Set aside next residual

    o = Conv2D(1, 1, activation='sigmoid')(x)
    return o
Exemplo n.º 23
0
def a3c_sepconv(x, params):
    """
    Feed forward model used in a3c paper but with seperable convolutions
    :param x: input tensor
    :param params: {dict} hyperparams (sub-selection)
    :return: output tensor
    :raises ValueError: could not find parameter
    """
    x = layers.SeparableConv2D(filters=16,
                               kernel_size=8,
                               strides=4,
                               activation='relu')(x)
    x = layers.SeparableConv2D(filters=32,
                               kernel_size=4,
                               strides=2,
                               activation='relu')(x)
    return x
Exemplo n.º 24
0
def getModel_SeparableConv2D():
	model=models.Sequential()
	model.add(layers.SeparableConv2D(32, (3,3), activation='relu', input_shape=(150,150,3)))
	model.add(layers.MaxPooling2D(2,2))
	model.add(layers.SeparableConv2D(64, (3,3), activation='relu' ))
	model.add(layers.MaxPooling2D(2,2))
	model.add(layers.SeparableConv2D(128, (3,3), activation='relu' ))
	model.add(layers.MaxPooling2D(2,2))
	model.add(layers.SeparableConv2D(128, (3,3), activation='relu' ))
	model.add(layers.MaxPooling2D(2,2))
	model.add(layers.Flatten())
	model.add(layers.Dense(512, activation='relu'))
	model.add(layers.Dense(1, activation='sigmoid'))
	# model.summary()
	# 编译模型
	# model.compile(loss='binary_crossentropy', optimizer=optimizers.RMSprop(lr=1e-4), metrics=['acc'] )
	model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['acc'] )
	return model
Exemplo n.º 25
0
def get_model():
    model = models.Sequential()
    model.add(
        layers.SeparableConv2D(32, (3, 3),
                               activation='relu',
                               input_shape=(32, 32, 3)))
    model.add(layers.MaxPool2D((2, 2)))
    model.add(layers.SeparableConv2D(32, (3, 3), activation='relu'))
    model.add(layers.MaxPool2D((2, 2)))
    model.add(layers.SeparableConv2D(64, (3, 3), activation='relu'))
    model.add(layers.MaxPool2D((2, 2)))
    model.add(layers.Flatten())
    model.add(layers.Dense(128, activation='relu'))
    model.add(layers.Dense(1, activation='sigmoid'))
    model.summary()
    model.compile(loss = 'binary_crossentropy',optimizer = \
                  optimizers.RMSprop(lr = 1e-3),metrics = ['acc'])
    return model
Exemplo n.º 26
0
def classification_coco(fpn_features, w_head, d_head, num_anchors,
                        num_classes):
    options = {
        'kernel_size': 3,
        'strides': 1,
        'padding': 'same',
        'depthwise_initializer': initializers.VarianceScaling(),
        'pointwise_initializer': initializers.VarianceScaling(),
    }
    cls_convs = [
        layers.SeparableConv2D(filters=w_head,
                               bias_initializer='zeros',
                               name=f'class_net/class-{i}',
                               **options) for i in range(d_head)
    ]
    cls_head_conv = layers.SeparableConv2D(
        filters=num_classes * num_anchors,
        bias_initializer=PriorProbability(probability=3e-4),
        name='class_net/class-predict',
        **options)
    cls_bns = [[
        layers.BatchNormalization(momentum=MOMENTUM,
                                  epsilon=EPSILON,
                                  name=f'class_net/class-{i}-bn-{j}')
        for j in range(3, 8)
    ] for i in range(d_head)]
    cls_relu = layers.Lambda(lambda x: tf.nn.swish(x))
    classification = []
    cls_reshape = layers.Reshape((-1, num_classes))
    cls_activation = layers.Activation('sigmoid')
    for i, feature in enumerate(fpn_features):
        for j in range(d_head):
            feature = cls_convs[j](feature)
            feature = cls_bns[j][i](feature)
            feature = cls_relu(feature)
        feature = cls_head_conv(feature)
        feature = cls_reshape(feature)
        feature = cls_activation(feature)
        classification.append(feature)
    classification = layers.Concatenate(axis=1,
                                        name='classification')(classification)
    return classification
Exemplo n.º 27
0
def _wasnet_block(x, filter, strides=(2, 2), nl='RE'):

    residual = layers.Conv2D(filter,
                             kernel_size=(1, 1),
                             strides=strides,
                             padding='same')(x)
    residual = layers.BatchNormalization(axis=bn_axis)(residual)

    cbam = attach_attention_module(residual, attention_module='cbam_block')

    x = layers.SeparableConv2D(filter, (3, 3), padding='same')(x)
    x = layers.BatchNormalization(axis=bn_axis)(x)
    x = return_activation(x, nl)
    x = layers.SeparableConv2D(filter, (3, 3), padding='same')(x)
    x = layers.BatchNormalization(axis=bn_axis)(x)

    x = layers.MaxPooling2D((3, 3), strides=strides, padding='same')(x)
    x = layers.add([x, residual, cbam])

    return x
Exemplo n.º 28
0
def SeparableConvBlock(num_channels, kernel_size, strides, name):
    f1 = layers.SeparableConv2D(num_channels,
                                kernel_size=kernel_size,
                                strides=strides,
                                padding='same',
                                use_bias=True,
                                name=f'{name}/conv')
    f2 = layers.BatchNormalization(momentum=MOMENTUM,
                                   epsilon=EPSILON,
                                   name=f'{name}/bn')
    return reduce(lambda f, g: lambda *args, **kwargs: g(f(*args, **kwargs)),
                  (f1, f2))
Exemplo n.º 29
0
def properties_sand(fpn_features, w_head, d_head, num_anchors, num_properties):
    options = {
        'kernel_size': 3,
        'strides': 1,
        'padding': 'same',
        'depthwise_initializer': initializers.VarianceScaling(),
        'pointwise_initializer': initializers.VarianceScaling(),
    }
    pro_convs = [
        layers.SeparableConv2D(filters=w_head,
                               bias_initializer='zeros',
                               name=f'property_net/property-{i}',
                               **options) for i in range(d_head)
    ]
    pro_head_conv = layers.SeparableConv2D(
        filters=num_properties * num_anchors,
        bias_initializer='zeros',
        name='property_net/property-predict',
        **options)
    pro_bns = [[
        layers.BatchNormalization(momentum=MOMENTUM,
                                  epsilon=EPSILON,
                                  name=f'property_net/property-{i}-bn-{j}')
        for j in range(3, 8)
    ] for i in range(d_head)]
    pro_relu = layers.Lambda(lambda x: tf.nn.swish(x))
    pro = []
    pro_reshape = layers.Reshape((-1, num_properties))
    pro_activation = layers.Activation('softmax')
    for i, feature in enumerate(fpn_features):
        for j in range(d_head):
            feature = pro_convs[j](feature)
            feature = pro_bns[j][i](feature)
            feature = pro_relu(feature)
        feature = pro_head_conv(feature)
        feature = pro_reshape(feature)
        feature = pro_activation(feature)
        pro.append(feature)
    pro = layers.Concatenate(axis=1, name='pro_sand')(pro)
    return pro
Exemplo n.º 30
0
def build_separable_model(calc_margin):
    print("Building model...")
    number_of_classes = 3
    input_shape = (64, 64, 1)

    x = layers.Input(shape=input_shape)
    # Trying separable Convolutions, but the accuracy dropped 5%
    conv1 = layers.SeparableConv2D(64, (9, 9), activation='relu',
                                   name="SepLayer")(x)
    '''
    The second layer is a Primary Capsule layer resulting from
    256×9×9 convolutions with strides of 2.
    This layer consists of 32 capsules with dimension of 8 each of
    which has feature maps of size 24×24 (i.e., each Component
    Capsule contains 24 × 24 localized individual Capsules).
    '''
    primaryCaps = PrimaryCap(inputs=conv1, dim_capsule=8,
                             n_channels=32, kernel_size=9, strides=2,
                             padding='valid')
    '''
    Final capsule layer includes 3 capsules, referred to as “Class
    Capsules,’ ’one for each type of candidate brain tumor. The
    dimension of these capsules is 16.
    '''
    capLayer2 = CapsuleLayer(num_capsule=3, dim_capsule=16, routings=3,
                             name="ThirdLayer")(primaryCaps)

    out_caps = Length(name='capsnet')(capLayer2)

    # Decoder network.
    y = layers.Input(shape=(number_of_classes,))
    # The true label is used to mask the output of capsule layer. For training
    masked_by_y = Mask()([capLayer2, y])

    # Shared Decoder model in training and prediction
    decoder = models.Sequential(name='decoder')
    decoder.add(layers.Dense(512, activation='relu',
                             input_dim=16 * number_of_classes))
    decoder.add(layers.Dense(1024, activation='relu'))
    decoder.add(layers.Dense(np.prod(input_shape), activation='sigmoid'))
    decoder.add(layers.Reshape(target_shape=input_shape, name='out_recon'))
    train_model = models.Model([x, y], [out_caps, decoder(masked_by_y)])

    if calc_margin is True:
        loss_func = [margin_loss, 'mse']
    else:
        loss_func = ['mse']
    train_model.compile(optimizer="rmsprop", loss=loss_func,
                        metrics=['accuracy'])
    train_model.summary()

    return train_model