def my_model():
    # prep layers
    inp = layers.Input(shape=(32, 32, 3))
    x = layers.Conv2D(64, 3, padding='same')(inp)
    x = layers.BatchNormalization(momentum=0.8)(x)
    x = layers.LeakyReLU(alpha=0.1)(x)
    # layer1
    x = layers.Conv2D(128, 3, padding='same')(x)
    x = layers.MaxPool2D()(x)
    x = layers.BatchNormalization(momentum=0.8)(x)
    x = layers.LeakyReLU(alpha=0.1)(x)
    x = layers.Add()([x, residual(x, 128)])
    # layer2
    x = layers.Conv2D(256, 3, padding='same')(x)
    x = layers.MaxPool2D()(x)
    x = layers.BatchNormalization(momentum=0.8)(x)
    x = layers.LeakyReLU(alpha=0.1)(x)
    # layer3
    x = layers.Conv2D(512, 3, padding='same')(x)
    x = layers.MaxPool2D()(x)
    x = layers.BatchNormalization(momentum=0.8)(x)
    x = layers.LeakyReLU(alpha=0.1)(x)
    x = layers.Add()([x, residual(x, 512)])
    # layers4
    x = layers.GlobalMaxPool2D()(x)
    x = layers.Flatten()(x)
    x = layers.Dense(10)(x)
    x = layers.Activation('softmax', dtype='float32')(x)
    model = tf.keras.Model(inputs=inp, outputs=x)

    return model
Beispiel #2
0
def SplitAttentionConv2D(x,
                         filters,
                         kernel_size,
                         stride=1,
                         padding=(0, 0),
                         groups=1,
                         use_bias=True,
                         radix=2,
                         name=None):
    bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1

    reduction_factor = 4
    inter_filters = max(filters * radix // reduction_factor, 32)
    x = layers.ZeroPadding2D((padding, padding), name=name + '_splat_pad')(x)
    x = layers.Conv2D(filters * radix,
                      kernel_size=kernel_size,
                      strides=stride,
                      groups=groups * radix,
                      use_bias=use_bias,
                      name=name + '_0_splat_conv')(x)
    x = layers.BatchNormalization(axis=bn_axis,
                                  epsilon=1.001e-5,
                                  name=name + '_0_splat_bn')(x)
    x = layers.Activation('relu', name=name + '_0_splat_relu')(x)

    splits = layers.Lambda(lambda x: tf.split(x, radix, bn_axis),
                           name=name + '_0_splat_split')(x)
    x = layers.Add(name=name + '_0_splat_add')(splits)
    x = layers.GlobalAveragePooling2D(name=name + '_0_splat_pool')(x)
    shape = (1, 1, filters) if bn_axis == 3 else (filters, 1, 1)
    x = layers.Reshape(shape, name=name + '_0_splat_reshape')(x)

    x = layers.Conv2D(inter_filters,
                      kernel_size=1,
                      groups=groups,
                      name=name + '_1_splat_conv')(x)
    x = layers.BatchNormalization(axis=bn_axis,
                                  epsilon=1.001e-5,
                                  name=name + '_1_splat_bn')(x)
    x = layers.Activation('relu', name=name + '_1_splat_relu')(x)

    # Attention
    x = layers.Conv2D(filters * radix,
                      kernel_size=1,
                      groups=groups,
                      name=name + '_2_splat_conv')(x)
    x = RSoftmax(x, filters * radix, radix, groups, name=name)
    x = layers.Lambda(lambda x: tf.split(x, radix, bn_axis),
                      name=name + '_1_splat_split')(x)
    x = layers.Lambda(
        lambda x: [tf.stack(x[0], axis=bn_axis),
                   tf.stack(x[1], axis=bn_axis)],
        name=name + '_splat_stack')([splits, x])
    x = layers.Multiply(name=name + '_splat_mult')(x)
    x = layers.Lambda(lambda x: tf.unstack(x, axis=bn_axis),
                      name=name + '_splat_unstack')(x)
    x = layers.Add(name=name + '_splat_add')(x)
    return x
Beispiel #3
0
def RetinaNet(input_shape, num_classes, num_anchor=9):
    """Creates the RetinaNet.
    RetinaNet is composed of an FPN, a classification sub-network and a localization regression sub-network.
    
    Args:
        input_shape (tuple): shape of input image.
        num_classes (int): number of classes.
        num_anchor (int, optional): number of anchor boxes. Defaults to 9.
    
    Returns:
        'Model' object: RetinaNet.
    """
    inputs = tf.keras.Input(shape=input_shape)
    # FPN
    resnet50 = tf.keras.applications.ResNet50(weights="imagenet", include_top=False, input_tensor=inputs, pooling=None)
    assert resnet50.layers[80].name == "conv3_block4_out"
    C3 = resnet50.layers[80].output
    assert resnet50.layers[142].name == "conv4_block6_out"
    C4 = resnet50.layers[142].output
    assert resnet50.layers[-1].name == "conv5_block3_out"
    C5 = resnet50.layers[-1].output
    P5 = layers.Conv2D(256, kernel_size=1, strides=1, padding='same')(C5)
    P5_upsampling = layers.UpSampling2D()(P5)
    P4 = layers.Conv2D(256, kernel_size=1, strides=1, padding='same')(C4)
    P4 = layers.Add()([P5_upsampling, P4])
    P4_upsampling = layers.UpSampling2D()(P4)
    P3 = layers.Conv2D(256, kernel_size=1, strides=1, padding='same')(C3)
    P3 = layers.Add()([P4_upsampling, P3])
    P6 = layers.Conv2D(256, kernel_size=3, strides=2, padding='same', name="P6")(C5)
    P7 = layers.Activation('relu')(P6)
    P7 = layers.Conv2D(256, kernel_size=3, strides=2, padding='same', name="P7")(P7)
    P5 = layers.Conv2D(256, kernel_size=3, strides=1, padding='same', name="P5")(P5)
    P4 = layers.Conv2D(256, kernel_size=3, strides=1, padding='same', name="P4")(P4)
    P3 = layers.Conv2D(256, kernel_size=3, strides=1, padding='same', name="P3")(P3)
    # classification subnet
    cls_subnet = classification_sub_net(num_classes=num_classes, num_anchor=num_anchor)
    P3_cls = cls_subnet(P3)
    P4_cls = cls_subnet(P4)
    P5_cls = cls_subnet(P5)
    P6_cls = cls_subnet(P6)
    P7_cls = cls_subnet(P7)
    cls_output = layers.Concatenate(axis=-2)([P3_cls, P4_cls, P5_cls, P6_cls, P7_cls])
    # localization subnet
    loc_subnet = regression_sub_net(num_anchor=num_anchor)
    P3_loc = loc_subnet(P3)
    P4_loc = loc_subnet(P4)
    P5_loc = loc_subnet(P5)
    P6_loc = loc_subnet(P6)
    P7_loc = loc_subnet(P7)
    loc_output = layers.Concatenate(axis=-2)([P3_loc, P4_loc, P5_loc, P6_loc, P7_loc])
    return tf.keras.Model(inputs=inputs, outputs=[cls_output, loc_output])
Beispiel #4
0
def ResNet9(input_size: Tuple[int, int, int] = (32, 32, 3),
            classes: int = 10) -> tf.keras.Model:
    """A small 9-layer ResNet Tensorflow model for cifar10 image classification.
    The model architecture is from https://github.com/davidcpage/cifar10-fast

    Args:
        input_size: The size of the input tensor (height, width, channels).
        classes: The number of outputs the model should generate.

    Raises:
        ValueError: Length of `input_size` is not 3.
        ValueError: `input_size`[0] or `input_size`[1] is not a multiple of 16.

    Returns:
        A TensorFlow ResNet9 model.
    """
    _check_input_size(input_size)

    # prep layers
    inp = layers.Input(shape=input_size)
    x = layers.Conv2D(64, 3, padding='same')(inp)
    x = layers.BatchNormalization(momentum=0.8)(x)
    x = layers.LeakyReLU(alpha=0.1)(x)
    # layer1
    x = layers.Conv2D(128, 3, padding='same')(x)
    x = layers.MaxPool2D()(x)
    x = layers.BatchNormalization(momentum=0.8)(x)
    x = layers.LeakyReLU(alpha=0.1)(x)
    x = layers.Add()([x, residual(x, 128)])
    # layer2
    x = layers.Conv2D(256, 3, padding='same')(x)
    x = layers.MaxPool2D()(x)
    x = layers.BatchNormalization(momentum=0.8)(x)
    x = layers.LeakyReLU(alpha=0.1)(x)
    # layer3
    x = layers.Conv2D(512, 3, padding='same')(x)
    x = layers.MaxPool2D()(x)
    x = layers.BatchNormalization(momentum=0.8)(x)
    x = layers.LeakyReLU(alpha=0.1)(x)
    x = layers.Add()([x, residual(x, 512)])
    # layers4
    x = layers.GlobalMaxPool2D()(x)
    x = layers.Flatten()(x)
    x = layers.Dense(classes)(x)
    x = layers.Activation('softmax', dtype='float32')(x)
    model = tf.keras.Model(inputs=inp, outputs=x)

    return model
Beispiel #5
0
def block2(x,
           filters,
           kernel_size=3,
           stride=1,
           conv_shortcut=False,
           name=None):
    """A residual block.

    Arguments:
        x: input tensor.
        filters: integer, filters of the bottleneck layer.
        kernel_size: default 3, kernel size of the bottleneck layer.
        stride: default 1, stride of the first layer.
        conv_shortcut: default False, use convolution shortcut if True,
          otherwise identity shortcut.
        name: string, block label.

    Returns:
      Output tensor for the residual block.
    """
    bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1

    preact = layers.BatchNormalization(axis=bn_axis,
                                       epsilon=1.001e-5,
                                       name=name + '_preact_bn')(x)
    preact = layers.Activation('relu', name=name + '_preact_relu')(preact)

    if conv_shortcut:
        shortcut = layers.Conv2D(4 * filters,
                                 1,
                                 strides=stride,
                                 name=name + '_0_conv')(preact)
    else:
        shortcut = layers.MaxPooling2D(1,
                                       strides=stride)(x) if stride > 1 else x

    x = layers.Conv2D(filters,
                      1,
                      strides=1,
                      use_bias=False,
                      name=name + '_1_conv')(preact)
    x = layers.BatchNormalization(axis=bn_axis,
                                  epsilon=1.001e-5,
                                  name=name + '_1_bn')(x)
    x = layers.Activation('relu', name=name + '_1_relu')(x)

    x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name=name + '_2_pad')(x)
    x = layers.Conv2D(filters,
                      kernel_size,
                      strides=stride,
                      use_bias=False,
                      name=name + '_2_conv')(x)
    x = layers.BatchNormalization(axis=bn_axis,
                                  epsilon=1.001e-5,
                                  name=name + '_2_bn')(x)
    x = layers.Activation('relu', name=name + '_2_relu')(x)

    x = layers.Conv2D(4 * filters, 1, name=name + '_3_conv')(x)
    x = layers.Add(name=name + '_out')([shortcut, x])
    return x
Beispiel #6
0
def _inverted_res_block(inputs, expansion, stride, alpha, filters, block_id):
    channel_axis = 1 if backend.image_data_format() == 'channels_first' else -1

    in_channels = backend.int_shape(inputs)[channel_axis]
    pointwise_conv_filters = int(filters * alpha)
    pointwise_filters = _make_divisible(pointwise_conv_filters, 8)
    x = inputs
    prefix = 'block_{}_'.format(block_id)

    if block_id:
        # Expand
        x = layers.Conv2D(round(expansion * in_channels),
                          kernel_size=1,
                          padding='same',
                          use_bias=False,
                          activation=None,
                          kernel_regularizer=regularizers.l2(l2_reg),
                          name=prefix + 'expand')(x)
        x = layers.BatchNormalization(axis=channel_axis,
                                      epsilon=1e-3,
                                      momentum=0.999,
                                      name=prefix + 'expand_BN')(x)
        x = layers.ReLU(6., name=prefix + 'expand_relu')(x)
    else:
        prefix = 'expanded_conv_'

    # Depthwise
    if stride == 2:
        x = layers.ZeroPadding2D(padding=correct_pad(backend, x, 3),
                                 name=prefix + 'pad')(x)
    x = layers.DepthwiseConv2D(kernel_size=3,
                               strides=stride,
                               activation=None,
                               use_bias=False,
                               depthwise_regularizer=regularizers.l2(l2_reg),
                               padding='same' if stride == 1 else 'valid',
                               name=prefix + 'depthwise')(x)
    x = layers.BatchNormalization(axis=channel_axis,
                                  epsilon=1e-3,
                                  momentum=0.999,
                                  name=prefix + 'depthwise_BN')(x)

    x = layers.ReLU(6., name=prefix + 'depthwise_relu')(x)

    # Project
    x = layers.Conv2D(pointwise_filters,
                      kernel_size=1,
                      padding='same',
                      use_bias=False,
                      activation=None,
                      kernel_regularizer=regularizers.l2(l2_reg),
                      name=prefix + 'project')(x)
    x = layers.BatchNormalization(axis=channel_axis,
                                  epsilon=1e-3,
                                  momentum=0.999,
                                  name=prefix + 'project_BN')(x)

    if in_channels == pointwise_filters and stride == 1:
        return layers.Add(name=prefix + 'add')([inputs, x])
    return x
Beispiel #7
0
    def __init__(self, F, scale_f, *args, **kwargs):

        super(_ResBlock, self).__init__(*args, **kwargs)
        self.conv1 = layers.Conv2D(F, (3, 3),
                                   padding="same",
                                   activation='relu')
        self.conv2 = layers.Conv2D(F, (3, 3), padding="same")
        self.scale = layers.Lambda(lambda x: scale_f * x, name="scale")
        self.add = layers.Add(name="add")
Beispiel #8
0
    def stem(x, filters, kernel_size=kernel_size, padding="same", strides=1):
        conv = layers.Conv2D(filters, kernel_size, padding=padding, strides=strides)(x)
        conv = conv_block(conv, filters, kernel_size=kernel_size, padding=padding, strides=strides, dilation_rate= dilation_rate)

        shortcut = layers.Conv2D(filters, kernel_size=(1, 1), padding=padding, strides=strides, dilation_rate= dilation_rate)(x)
        shortcut = bn_act(shortcut, act=False)

        output = layers.Add()([conv, shortcut])
        return output
Beispiel #9
0
    def residual_block(x, filters, kernel_size=(3, 3), padding="same", strides=1):# image dimensions are divided by the strides number
        res = conv_block(x, filters, kernel_size=kernel_size, padding=padding, strides=strides)
        res = conv_block(res, filters, kernel_size=kernel_size, padding=padding, strides=1)

        shortcut = layers.Conv2D(filters, kernel_size=(1, 1), padding=padding, strides=strides)(x)
        shortcut = bn_act(shortcut, act=False)

        output = layers.Add()([shortcut, res])
        return output
Beispiel #10
0
def resblock_body(x, num_filters, num_blocks):
    '''A series of resblocks starting with a downsampling Convolution2D'''
    # Darknet uses left and top padding instead of 'same' mode
    x = kl.ZeroPadding2D(((1, 0), (1, 0)))(x)
    x = DarknetConv2D_BN_Leaky(num_filters, (3, 3), strides=(2, 2))(x)
    for i in range(num_blocks):
        y = compose(DarknetConv2D_BN_Leaky(num_filters // 2, (1, 1)),
                    DarknetConv2D_BN_Leaky(num_filters, (3, 3)))(x)
        x = kl.Add()([x, y])
    return x
Beispiel #11
0
def identity_block(input_tensor,
                   kernel_size,
                   filters,
                   stage,
                   block,
                   use_bias=True,
                   norm_use="bn",
                   train_bn=True):
    """The identity_block is the block that has no conv layer at shortcut
    # Arguments
        input_tensor: input tensor
        kernel_size: defualt 3, the kernel size of middle conv layer at main path
        filters: list of integers, the nb_filters of 3 conv layer at main path
        stage: integer, current stage label, used for generating layer names
        block: 'a','b'..., current block label, used for generating layer names
        use_bias: Boolean. To use or not use a bias in conv layers.
        train_bn: Boolean. Train or freeze Batch Norm layres
    """
    nb_filter1, nb_filter2, nb_filter3 = filters
    conv_name_base = 'res' + str(stage) + block + '_branch'
    norm_name_base = str(stage) + block + "_branch"

    x = KL.Conv2D(nb_filter1, (1, 1),
                  name=conv_name_base + '2a',
                  kernel_initializer='he_normal',
                  use_bias=use_bias)(input_tensor)
    x = normalize_layer(x,
                        name=norm_name_base + "2a",
                        norm_use=norm_use,
                        train_bn=train_bn)
    x = KL.Activation('relu')(x)

    x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size),
                  padding='same',
                  kernel_initializer='he_normal',
                  name=conv_name_base + '2b',
                  use_bias=use_bias)(x)
    x = normalize_layer(x,
                        name=norm_name_base + "2b",
                        norm_use=norm_use,
                        train_bn=train_bn)
    x = KL.Activation('relu')(x)

    x = KL.Conv2D(nb_filter3, (1, 1),
                  name=conv_name_base + '2c',
                  kernel_initializer='he_normal',
                  use_bias=use_bias)(x)
    x = normalize_layer(x,
                        name=norm_name_base + "2c",
                        norm_use=norm_use,
                        train_bn=train_bn)

    x = KL.Add()([x, input_tensor])
    x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)
    return x
Beispiel #12
0
def resblock(input_tensor, num_channels):
    """
    returns the basic unit of the ResNet network - the residual block
    """
    conv1 = layers.Conv2D(num_channels, kernel_size=(3, 3),
                          padding='same')(input_tensor)
    act = layers.Activation('relu')(conv1)
    conv2 = layers.Conv2D(num_channels, kernel_size=(3, 3),
                          padding='same')(act)
    add = layers.Add()([conv2, input_tensor])
    return layers.Activation('relu')(add)
Beispiel #13
0
 def _context_module(self,
                     num_filters,
                     inputs,
                     dropout_rate=0.3,
                     strides=(1, 1)):
     conv_0 = self._conv_block(num_filters, inputs, strides=strides)
     conv_1 = self._conv_block(num_filters, conv_0)
     dropout = layers.SpatialDropout2D(rate=dropout_rate)(conv_1)
     conv_2 = self._conv_block(num_filters, dropout)
     sum_output = layers.Add()([conv_0, conv_2])
     return sum_output
Beispiel #14
0
def ResidualBlock(input, num_filters):
    # x = res_seq(num_filters)(input)
    x = layers.Conv2D(num_filters, (1, 1))(input)
    x = layers.BatchNormalization()(x)
    x = layers.Activation('relu')(x)
    x = layers.Conv2D(num_filters, (3, 3), padding='same')(x)
    x = layers.BatchNormalization()(x)
    x = layers.Activation('relu')(x)
    x = layers.Conv2D(num_filters, (1, 1))(x)
    x = layers.BatchNormalization()(x)
    x = layers.Add()([x, input])
    return layers.Activation('relu')(x)
Beispiel #15
0
def identity_block(x, **kwargs):
    """The identity block is the block that has no conv layer at shortcut.
    # Arguments
        input_tensor: input tensor
        kernel_size: defualt 3, the kernel size of middle conv layer at main path
        filters: list of integers, the filterss of 3 conv layer at main path
        stage: integer, current stage label, used for generating layer names
        block: 'a','b'..., current block label, used for generating layer names
    # Returns
        Output tensor for the block.
    """
    kernel_size = get_varargin(kwargs, 'kernel_size', 3)
    strides = get_varargin(kwargs, 'strides', (1,1))
    stage = get_varargin(kwargs, 'stage', 2)
    conv_block = get_varargin(kwargs, 'conv_block', False)
    block_id = get_varargin(kwargs, 'block_id', 1)
    
    if stage == 2:
        filters = config.MODEL.RESNET.FILTERS_C2
    elif stage == 3:
        filters = config.MODEL.RESNET.FILTERS_C3
    elif stage == 4:
        filters = config.MODEL.RESNET.FILTERS_C4
    else:
        filters = config.MODEL.RESNET.FILTERS_C5
        
    filters1, filters2, filters3 = filters
    bn_axis = 3 # Channel last, tensorflow backend
    prefix_blockname = 'C{}_branch2_blk{}_'.format(stage, block_id)    
    fx = KL.Conv2D(filters1, (1, 1), strides = strides, name = '{}Conv1'.format(prefix_blockname))(x)
    fx = KL.BatchNormalization(axis=bn_axis, name = '{}Bnorm1'.format(prefix_blockname))(fx) #bn: batchnorm
    fx = KL.Activation('relu', name = '{}Act1'.format(prefix_blockname))(fx)

    fx = KL.Conv2D(filters2, kernel_size,padding='same', name = '{}Conv2'.format(prefix_blockname))(fx)
    fx = KL.BatchNormalization(axis = bn_axis, name = '{}Bnorm2'.format(prefix_blockname))(fx)
    fx = KL.Activation('relu', name = '{}Act2'.format(prefix_blockname))(fx)

    fx = KL.Conv2D(filters3, (1, 1), name = '{}Conv3'.format(prefix_blockname))(fx)
    fx = KL.BatchNormalization(axis=bn_axis, name = '{}Bnorm3'.format(prefix_blockname))(fx)
    
    prefix_blockname = 'C{}_branch1_blk{}_'.format(stage, block_id)    
    #Shortcut branch     
    if conv_block is True:
        shortcut = KL.Conv2D(filters3, (1, 1), strides = strides, name = '{}Conv1'.format(prefix_blockname))(x)
        shortcut = KL.BatchNormalization(axis=bn_axis, name = '{}Bnorm1'.format(prefix_blockname))(shortcut)
    else:
        shortcut = x
        
#   Merge
    fx = KL.Add()([fx, shortcut])
    fx = KL.Activation('relu')(fx)
    return fx
Beispiel #16
0
def _resblock(x0, num_filter=256, kernel_size=3):
    x = ReflectionPadding2D()(x0)
    x = layers.Conv2D(filters=num_filter, kernel_size=kernel_size, kernel_initializer=RandomNormal(mean=0,
                                                                                                   stddev=0.02))(x)
    x = InstanceNormalization()(x)
    x = layers.ReLU()(x)

    x = ReflectionPadding2D()(x)
    x = layers.Conv2D(filters=num_filter, kernel_size=kernel_size, kernel_initializer=RandomNormal(mean=0,
                                                                                                   stddev=0.02))(x)
    x = InstanceNormalization()(x)
    x = layers.Add()([x, x0])
    return x
Beispiel #17
0
def build_nn_model(height, width, num_channels, num_res_blocks):
    """
    returns the complete Neural-Network model
    """
    in1 = layers.Input(shape=(height, width, 1))
    conv1 = layers.Conv2D(num_channels, kernel_size=(3, 3),
                          padding='same')(in1)
    act = layers.Activation('relu')(conv1)
    for i in range(num_res_blocks):
        act = resblock(act, num_channels)
    conv2 = layers.Conv2D(1, kernel_size=(3, 3), padding='same')(act)
    add = layers.Add()([conv2, in1])
    return models.Model(inputs=in1, outputs=add)
Beispiel #18
0
    def layer(input_tensor):

        x = input_tensor
        residual = input_tensor

        # bottleneck
        x = layers.Conv2D(filters // 2, (1, 1),
                          kernel_initializer='he_uniform',
                          use_bias=False)(x)
        x = layers.BatchNormalization(**bn_params)(x)
        x = layers.Activation('relu')(x)

        x = layers.ZeroPadding2D(1)(x)
        x = GroupConv2D(filters, (3, 3),
                        strides=strides,
                        groups=groups,
                        kernel_initializer='he_uniform',
                        use_bias=False,
                        **kwargs)(x)
        x = layers.BatchNormalization(**bn_params)(x)
        x = layers.Activation('relu')(x)

        x = layers.Conv2D(filters, (1, 1),
                          kernel_initializer='he_uniform',
                          use_bias=False)(x)
        x = layers.BatchNormalization(**bn_params)(x)

        #  if number of filters or spatial dimensions changed
        #  make same manipulations with residual connection
        x_channels = get_num_channels(x)
        r_channels = get_num_channels(residual)

        if strides != 1 or x_channels != r_channels:
            if padding:
                residual = layers.ZeroPadding2D(1)(residual)
            residual = layers.Conv2D(x_channels,
                                     downsample_kernel_size,
                                     strides=strides,
                                     kernel_initializer='he_uniform',
                                     use_bias=False)(residual)
            residual = layers.BatchNormalization(**bn_params)(residual)

        # apply attention module
        x = ChannelSE(reduction=reduction, **kwargs)(x)

        # add residual connection
        x = layers.Add()([x, residual])

        x = layers.Activation('relu')(x)

        return x
Beispiel #19
0
def _inverted_res_block(x, expansion, filters, kernel_size, stride, se_ratio,
                        activation, block_id):
    channel_axis = 1 if backend.image_data_format() == 'channels_first' else -1
    shortcut = x
    prefix = 'expanded_conv/'
    infilters = backend.int_shape(x)[channel_axis]
    if block_id:
        # Expand
        prefix = 'expanded_conv_{}/'.format(block_id)
        x = layers.Conv2D(_depth(infilters * expansion),
                          kernel_size=1,
                          padding='same',
                          use_bias=False,
                          name=prefix + 'expand')(x)
        x = layers.BatchNormalization(axis=channel_axis,
                                      epsilon=1e-3,
                                      momentum=0.999,
                                      name=prefix + 'expand/BatchNorm')(x)
        x = activation(x)

    if stride == 2:
        x = layers.ZeroPadding2D(padding=imagenet_utils.correct_pad(
            x, kernel_size),
                                 name=prefix + 'depthwise/pad')(x)
    x = layers.DepthwiseConv2D(kernel_size,
                               strides=stride,
                               padding='same' if stride == 1 else 'valid',
                               use_bias=False,
                               name=prefix + 'depthwise')(x)
    x = layers.BatchNormalization(axis=channel_axis,
                                  epsilon=1e-3,
                                  momentum=0.999,
                                  name=prefix + 'depthwise/BatchNorm')(x)
    x = activation(x)

    if se_ratio:
        x = _se_block(x, _depth(infilters * expansion), se_ratio, prefix)

    x = layers.Conv2D(filters,
                      kernel_size=1,
                      padding='same',
                      use_bias=False,
                      name=prefix + 'project')(x)
    x = layers.BatchNormalization(axis=channel_axis,
                                  epsilon=1e-3,
                                  momentum=0.999,
                                  name=prefix + 'project/BatchNorm')(x)

    if stride == 1 and infilters == filters:
        x = layers.Add(name=prefix + 'Add')([shortcut, x])
    return x
Beispiel #20
0
def block1(x, filters, kernel_size=3, stride=1, conv_shortcut=True, name=None):
    """A residual block.

    Arguments:
      x: input tensor.
      filters: integer, filters of the bottleneck layer.
      kernel_size: default 3, kernel size of the bottleneck layer.
      stride: default 1, stride of the first layer.
      conv_shortcut: default True, use convolution shortcut if True,
          otherwise identity shortcut.
      name: string, block label.

    Returns:
      Output tensor for the residual block.
    """
    bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1

    if conv_shortcut:
        shortcut = layers.Conv2D(4 * filters,
                                 1,
                                 strides=stride,
                                 name=name + '_0_conv')(x)
        shortcut = layers.BatchNormalization(axis=bn_axis,
                                             epsilon=1.001e-5,
                                             name=name + '_0_bn')(shortcut)
    else:
        shortcut = x

    x = layers.Conv2D(filters, 1, strides=stride, name=name + '_1_conv')(x)
    x = layers.BatchNormalization(axis=bn_axis,
                                  epsilon=1.001e-5,
                                  name=name + '_1_bn')(x)
    x = layers.Activation('relu', name=name + '_1_relu')(x)

    x = layers.Conv2D(filters,
                      kernel_size,
                      padding='SAME',
                      name=name + '_2_conv')(x)
    x = layers.BatchNormalization(axis=bn_axis,
                                  epsilon=1.001e-5,
                                  name=name + '_2_bn')(x)
    x = layers.Activation('relu', name=name + '_2_relu')(x)

    x = layers.Conv2D(4 * filters, 1, name=name + '_3_conv')(x)
    x = layers.BatchNormalization(axis=bn_axis,
                                  epsilon=1.001e-5,
                                  name=name + '_3_bn')(x)

    x = layers.Add(name=name + '_add')([shortcut, x])
    x = layers.Activation('relu', name=name + '_out')(x)
    return x
Beispiel #21
0
    def block(inputs):

        if block_args.expand_ratio != 1:
            x = KL.Conv2D(filters,
                          kernel_size=[1, 1],
                          strides=[1, 1],
                          kernel_initializer=conv_kernel_initializer,
                          padding='same',
                          use_bias=False)(inputs)
            x = KL.BatchNormalization(axis=channel_axis,
                                      momentum=batch_norm_momentum,
                                      epsilon=batch_norm_epsilon)(x)
            x = Swish()(x)
        else:
            x = inputs

        x = KL.DepthwiseConv2D([kernel_size, kernel_size],
                               strides=block_args.strides,
                               depthwise_initializer=conv_kernel_initializer,
                               padding='same',
                               use_bias=False)(x)
        x = KL.BatchNormalization(axis=channel_axis,
                                  momentum=batch_norm_momentum,
                                  epsilon=batch_norm_epsilon)(x)
        x = Swish()(x)

        if has_se:
            x = SEBlock(block_args, global_params)(x)

        # output phase

        x = KL.Conv2D(block_args.output_filters,
                      kernel_size=[1, 1],
                      strides=[1, 1],
                      kernel_initializer=conv_kernel_initializer,
                      padding='same',
                      use_bias=False)(x)
        x = KL.BatchNormalization(axis=channel_axis,
                                  momentum=batch_norm_momentum,
                                  epsilon=batch_norm_epsilon)(x)

        if block_args.id_skip:
            if all(s == 1 for s in block_args.strides
                   ) and block_args.input_filters == block_args.output_filters:
                # only apply drop_connect if skip presents.
                if drop_connect_rate:
                    x = DropConnect(drop_connect_rate)(x)
                x = KL.Add()([x, inputs])
        return x
Beispiel #22
0
    def build(self):
        inputs = layers.Input(self.input_size)

        output0 = self._context_module(16, inputs, strides=(1, 1))
        output1 = self._context_module(32, output0, strides=(2, 2))
        output2 = self._context_module(64, output1, strides=(2, 2))
        output3 = self._context_module(128, output2, strides=(2, 2))
        output4 = self._context_module(256, output3, strides=(2, 2))

        decoder0 = self._decoder_block(128, [output3, output4])
        decoder1 = self._decoder_block(64, [output2, decoder0])
        decoder2 = self._decoder_block(32, [output1, decoder1])
        decoder3 = self._decoder_block_last(16, [output0, decoder2])
        output0 = layers.Conv2D(self.num_class, (1, 1))(decoder3)
        output1 = layers.Conv2D(self.num_class, (1, 1))(decoder2)
        output2_up = layers.UpSampling2D(size=(2, 2))(layers.Conv2D(
            self.num_class, (1, 1))(decoder1))

        output_sum = layers.Add()([output2_up, output1])
        output_sum = layers.UpSampling2D(size=(2, 2))(output_sum)
        output_sum = layers.Add()([output_sum, output0])
        output = layers.Softmax()(output_sum)

        return models.Model(inputs=[inputs], outputs=[output])
Beispiel #23
0
def net():
    input_x = layers.Input(shape=(3, ), name="input_x")
    input_z = layers.Input(shape=(2, ), name="input_z")

    dense_1_x = layers.Dense(25, activation="relu")(input_x)
    dense_1_z = layers.Dense(25, activation="relu")(input_z)
    add = layers.Add()([dense_1_x, dense_1_z])
    dense_2 = layers.Dense(10, activation="relu")(add)
    dense_3 = layers.Dense(10, activation="relu")(dense_2)

    output_cls = layers.Dense(3, activation="softmax",
                              name="classification")(dense_3)
    output_reg = layers.Dense(2, activation="linear",
                              name="regression")(dense_3)

    return Model(inputs=[input_x, input_z], outputs=[output_reg, output_cls])
Beispiel #24
0
def block2(x,
           filters,
           kernel_size=3,
           stride=1,
           conv_shortcut=False,
           name=None):
    bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1

    preact = layers.BatchNormalization(axis=bn_axis,
                                       epsilon=1.001e-5,
                                       name=name + '_preact_bn')(x)
    preact = layers.Activation('relu', name=name + '_preact_relu')(preact)

    if conv_shortcut:
        shortcut = layers.Conv2D(4 * filters,
                                 1,
                                 strides=stride,
                                 name=name + '_0_conv')(preact)
    else:
        shortcut = layers.MaxPooling2D(1,
                                       strides=stride)(x) if stride > 1 else x

    x = layers.Conv2D(filters,
                      1,
                      strides=1,
                      use_bias=False,
                      name=name + '_1_conv')(preact)
    x = layers.BatchNormalization(axis=bn_axis,
                                  epsilon=1.001e-5,
                                  name=name + '_1_bn')(x)
    x = layers.Activation('relu', name=name + '_1_relu')(x)

    x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name=name + '_2_pad')(x)
    x = layers.Conv2D(filters,
                      kernel_size,
                      strides=stride,
                      use_bias=False,
                      name=name + '_2_conv')(x)
    x = layers.BatchNormalization(axis=bn_axis,
                                  epsilon=1.001e-5,
                                  name=name + '_2_bn')(x)
    x = layers.Activation('relu', name=name + '_2_relu')(x)

    x = layers.Conv2D(4 * filters, 1, name=name + '_3_conv')(x)
    x = layers.Add(name=name + '_out')([shortcut, x])
    return x
Beispiel #25
0
def block1(x,
    filters,
    bottleneck=False,
    stride=1,
    expansion=1,
    activation='relu',
    bn_sync=BN_SYNC,
    name=None):
    """A basic residual block.

    Arguments:
        x: input tensor.
        filters: integer, filters of the bottleneck layer.
        stride: default 1, stride of the first layer.
        conv_shortcut: default True, use convolution shortcut if True,
                otherwise identity shortcut.
        name: string, block label.

    Returns:
        Output tensor for the residual block.
    """
    conv_shortcut = (stride != 1) or (expansion*filters != x.shape[3])
    if conv_shortcut:
        shortcut = conv1x1(x, filters=expansion*filters, strides=stride,
            name=name+'_0_conv')
        shortcut = batchnorm(shortcut, bn_sync=bn_sync, name=name+'_0_bn')
    else:
        shortcut = x
    # First conv.
    if bottleneck:
        x = conv1x1(x, filters=filters, strides=1, name=name+'_1_conv')
        x = batchnorm(x, bn_sync=bn_sync, name=name+'_1_bn')
        x = nonlinearity(x, layer_activation=activation, name=name+'_1_'+activation)
    # Second conv.
    idx = 2 if bottleneck else 1
    x = conv3x3(x, filters=filters, strides=stride, name=name+'_%d_conv' %idx)
    x = batchnorm(x, bn_sync=bn_sync, name=name+'_%d_bn' %idx)
    x = nonlinearity(x, layer_activation=activation, name=name+'_%d_%s' %(idx, activation))
    # Last conv.
    last_conv = conv1x1 if bottleneck else conv3x3
    x = last_conv(x, filters=expansion*filters, strides=1, name=name+'_%d_conv' %(idx+1))
    x = batchnorm(x, bn_sync=bn_sync, name=name+'_%d_bn' %(idx+1))
    # Skip connection.
    x = layers.Add(name=name+'_add')([shortcut, x])
    x = nonlinearity(x, layer_activation=activation, name=name+'_out_'+activation)
    return x
Beispiel #26
0
def EDSR_func(inp, scale, F, nb_res, res_scale_f):
    x = MeanShift(-1)(inp)
    x = layers.Conv2D(F, (3, 3), padding="same")(inp)
    conv1 = x
    for i in range(nb_res):
        x = _ResBlock(F, res_scale_f, name="res%d" % i)(x)
    x = layers.Conv2D(F, (3, 3), padding="same")(x)
    x = layers.Add()([conv1, x])
    if scale == 2 or scale == 3:
        x = SubpixelLayer(scale=scale, out_channel=F, kernel_size=3)(x)
    elif scale == 4:
        x = SubpixelLayer(scale=2, out_channel=F, kernel_size=3)(x)
        x = SubpixelLayer(scale=2, out_channel=F, kernel_size=3)(x)
    else:
        raise ValueError("Wrong value of scale factor.")
    out = layers.Conv2D(3, (3, 3), padding="same")(x)
    out = MeanShift(1)(out)
    return out
Beispiel #27
0
def shortcut(x, residual):
    '''shortcut connection を作成する。
    '''
    x_shape = backend.int_shape(x)
    residual_shape = backend.int_shape(residual)

    if x_shape == residual_shape:
        # x と residual の形状が同じ場合、なにもしない。
        shortcut = x
    else:
        # x と residual の形状が異なる場合、線形変換を行い、形状を一致させる。
        stride_w = int(round(x_shape[1] / residual_shape[1]))
        stride_h = int(round(x_shape[2] / residual_shape[2]))

        shortcut = layers.Conv2D(filters=residual_shape[3],
                                 kernel_size=(1, 1),
                                 strides=(stride_w, stride_h),
                                 kernel_initializer='he_normal',
                                 kernel_regularizer=regularizers.l2(1.e-4))(x)
    return layers.Add()([shortcut, residual])
Beispiel #28
0
    def build_model(self):
        """Build a critic (value) network that maps (state, action) pairs -> Q-values."""
        # Define input layers
        states = layers.Input(shape=(self.state_size, ), name='states')
        actions = layers.Input(shape=(self.action_size, ), name='actions')

        # Add hidden layer(s) for state pathway
        net_states = layers.Dense(units=32, activation='relu')(states)
        net_states = layers.Dropout(0.8)(net_states)
        net_states = layers.Dense(units=64, activation='relu')(net_states)
        net_states = layers.Dropout(0.8)(net_states)

        # Add hidden layer(s) for action pathway
        net_actions = layers.Dense(units=32, activation='relu')(actions)
        net_actions = layers.Dense(units=64, activation='relu')(net_actions)

        # Try different layer sizes, activations, add batch normalization, regularizers, etc.

        # Combine state and action pathways
        net = layers.Add()([net_states, net_actions])
        net = layers.Activation('relu')(net)

        # Add more layers to the combined network if needed

        # Add final output layer to prduce action values (Q values)
        Q_values = layers.Dense(units=1, name='q_values')(net)

        # Create Keras model
        self.model = models.Model(inputs=[states, actions], outputs=Q_values)

        # Define optimizer and compile model for training with built-in loss function
        optimizer = optimizers.Adam(lr=0.0001)
        self.model.compile(optimizer=optimizer, loss='mse')

        # Compute action gradients (derivative of Q values w.r.t. to actions)
        action_gradients = K.gradients(Q_values, actions)

        # Define an additional function to fetch action gradients (to be used by actor model)
        self.get_action_gradients = K.function(
            inputs=[*self.model.input, K.learning_phase()],
            outputs=action_gradients)
Beispiel #29
0
    def __init__(self, filter_num, channel, stride=1, reduction=16):
        super().__init__()
        self.conv1 = layers.Conv1D(filter_num,
                                   3,
                                   strides=stride,
                                   padding='same')
        self.bn1 = layers.BatchNormalization()
        self.relu = layers.Activation('relu')

        self.conv2 = layers.Conv1D(filter_num, 3, strides=1, padding='same')
        self.bn2 = layers.BatchNormalization()

        self.add = layers.Add()
        self.se = SELayer(channel, reduction)
        self.se2 = SELayer(channel, reduction)
        if stride != 1:
            self.downsample = Sequential()
            self.downsample.add(layers.Conv1D(filter_num, 1, strides=stride))
            self.downsample.add(layers.BatchNormalization())
        else:
            self.downsample = lambda x: x
Beispiel #30
0
def block3(x,
    filters,
    bottleneck=False,
    stride=1,
    expansion=1,
    activation='leaky_relu',
    bn_sync=BN_SYNC,
    conv_shortcut=False,
    use_bias=False,
    name=None):
    """ A residual block.

    """
    preact = batchnorm(x, bn_mom=0.999, bn_eps=0.001, bn_sync=bn_sync, name=name+'_preact_bn')
    preact = nonlinearity(preact, layer_activation=activation,
        name=name+'_preact_'+activation)
    shortcut = preact if conv_shortcut else x
    if shortcut.shape[3] != expansion*filters:
        shortcut = conv1x1(shortcut, filters=expansion*filters, strides=stride,
            use_bias=use_bias, name=name+'_0_conv')
    # First conv.
    if bottleneck:
        x = conv1x1(preact, filters=filters, strides=1,
            use_bias=use_bias, name=name+'_1_conv')
        x = batchnorm(x, bn_mom=0.999, bn_eps=0.001, bn_sync=bn_sync, name=name+'_1_bn')
        x = nonlinearity(x, layer_activation=activation, name=name+'_1_'+activation)
    # Second conv.
    idx = 2 if bottleneck else 1
    x = conv3x3(x if bottleneck else preact, filters=filters, strides=stride,
        use_bias=use_bias, name=name+'_%d_conv' %idx)
    x = batchnorm(x, bn_mom=0.999, bn_eps=0.001, bn_sync=bn_sync, name=name+'_%d_bn' %idx)
    x = nonlinearity(x, layer_activation=activation, name=name+'_%d_%s' %(idx, activation))
    # Second conv.
    last_conv = conv1x1 if bottleneck else conv3x3
    x = last_conv(x, filters=expansion*filters, strides=1,
        use_bias=use_bias, name=name+'_%d_conv' %(idx+1))
    # Skip connection.
    x = layers.Add(name=name+'_add')([shortcut, x])
    return x