def build(dim):  #dim is the square dimensions of the input image
        model = models.Sequential()

        #convolution layer with 4 3x3 kernels, passed the input image
        model.add(
            layers.Conv2D(4, (3, 3),
                          activation='relu',
                          input_shape=(dim, dim, 1)))
        model.add(layers.AvgPool2D(
            (2, 2)))  #perform average pooling 2x2 with a stride of 2 (default)

        #second convolution layer with 4 3x3 kernels
        model.add(layers.Conv2D(4, (3, 3), activation='relu'))
        model.add(layers.AvgPool2D((2, 2)))

        #Fully Connected or Densely Connected Classifier Network
        model.add(layers.Flatten())  #restructure data for FC network
        model.add(layers.Dropout(0.5))  #apply dropout regularization
        model.add(layers.Dense(
            8, activation='relu'))  #Dense layer with 8 fully connected neurons

        #Output layer with softmax classifier to for multiclass-single label problem
        model.add(layers.Dense(4, activation='softmax'))
        model.summary()  #print the network parameters
        num_layers = 4  #parameter used for plotting activation maps in Visualization
        return model, num_layers
Esempio n. 2
0
def unet(t_x):
    input_img = layers.Input(t_x.shape[1:], name='RGB_Input')
    pp_in_layer = input_img

    if NET_SCALING is not None:
        pp_in_layer = layers.AvgPool2D(NET_SCALING)(pp_in_layer)

#pp_in_layer = layers.GaussianNoise(GAUSSIAN_NOISE)(pp_in_layer)
    pp_in_layer = layers.BatchNormalization()(pp_in_layer)

    c1 = layers.Conv2D(8, (3, 3), activation='relu',
                       padding='same')(pp_in_layer)
    c1 = layers.Conv2D(8, (3, 3), activation='relu', padding='same')(c1)
    p1 = layers.MaxPooling2D((2, 2))(c1)

    c2 = layers.Conv2D(16, (3, 3), activation='relu', padding='same')(p1)
    c2 = layers.Conv2D(16, (3, 3), activation='relu', padding='same')(c2)
    p2 = layers.MaxPooling2D((2, 2))(c2)

    c3 = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(p2)
    c3 = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(c3)
    p3 = layers.MaxPooling2D((2, 2))(c3)

    c4 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(p3)
    c4 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(c4)
    p4 = layers.MaxPooling2D(pool_size=(2, 2))(c4)

    c5 = layers.Conv2D(128, (3, 3), activation='relu', padding='same')(p4)
    c5 = layers.Conv2D(128, (3, 3), activation='relu', padding='same')(c5)

    u6 = upsample(64, (2, 2), strides=(2, 2), padding='same')(c5)
    u6 = layers.concatenate([u6, c4])
    c6 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(u6)
    c6 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(c6)

    u7 = upsample(32, (2, 2), strides=(2, 2), padding='same')(c6)
    u7 = layers.concatenate([u7, c3])
    c7 = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(u7)
    c7 = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(c7)

    u8 = upsample(16, (2, 2), strides=(2, 2), padding='same')(c7)
    u8 = layers.concatenate([u8, c2])
    c8 = layers.Conv2D(16, (3, 3), activation='relu', padding='same')(u8)
    c8 = layers.Conv2D(16, (3, 3), activation='relu', padding='same')(c8)

    u9 = upsample(8, (2, 2), strides=(2, 2), padding='same')(c8)
    u9 = layers.concatenate([u9, c1])
    c9 = layers.Conv2D(8, (3, 3), activation='relu', padding='same')(u9)
    c9 = layers.Conv2D(8, (3, 3), activation='relu', padding='same')(c9)

    d = layers.Conv2D(1, (1, 1), activation='sigmoid')(c9)
    #d = layers.Cropping2D((EDGE_CROP, EDGE_CROP))(d)
    #d = layers.ZeroPadding2D((EDGE_CROP, EDGE_CROP))(d)

    if NET_SCALING is not None:
        d = layers.UpSamplling2D(NET_SCALING)(d)

    segmentation_model = models.Model(input=[input_img], outputs=[d])
    segmentation_model.summary()
    return segmentation_model
Esempio n. 3
0
def darknet(channels,
            odd_pointwise,
            avg_pool_size,
            cls_activ,
            in_channels=3,
            in_size=(224, 224),
            classes=1000):
    """
    DarkNet model from 'Darknet: Open source neural networks in c,' https://github.com/pjreddie/darknet.

    Parameters:
    ----------
    channels : list of list of int
        Number of output channels for each unit.
    odd_pointwise : bool
        Whether pointwise convolution layer is used for each odd unit.
    avg_pool_size : int
        Window size of the final average pooling.
    cls_activ : bool
        Whether classification convolution layer uses an activation.
    in_channels : int, default 3
        Number of input channels.
    in_size : tuple of two ints, default (224, 224)
        Spatial size of the expected input image.
    classes : int, default 1000
        Number of classification classes.
    """
    input_shape = (in_channels, 224,
                   224) if K.image_data_format() == 'channels_first' else (
                       224, 224, in_channels)
    input = nn.Input(shape=input_shape)

    x = input
    for i, channels_per_stage in enumerate(channels):
        for j, out_channels in enumerate(channels_per_stage):
            x = dark_convYxY(x=x,
                             in_channels=in_channels,
                             out_channels=out_channels,
                             pointwise=(len(channels_per_stage) > 1)
                             and not (((j + 1) % 2 == 1) ^ odd_pointwise),
                             name="features/stage{}/unit{}".format(
                                 i + 1, j + 1))
            in_channels = out_channels
        if i != len(channels) - 1:
            x = nn.MaxPool2D(pool_size=2,
                             strides=2,
                             name="features/pool{}".format(i + 1))(x)

    x = nn.Conv2D(filters=classes, kernel_size=1, name="output/final_conv")(x)
    if cls_activ:
        x = nn.LeakyReLU(alpha=0.1, name="output/final_activ")(x)
    x = nn.AvgPool2D(pool_size=avg_pool_size,
                     strides=1,
                     name="output/final_pool")(x)
    x = nn.Flatten()(x)

    model = Model(inputs=input, outputs=x)
    model.in_size = in_size
    model.classes = classes
    return model
Esempio n. 4
0
def menet(channels,
          init_block_channels,
          side_channels,
          groups,
          in_channels=3,
          in_size=(224, 224),
          classes=1000):
    """
    ShuffleNet model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
    https://arxiv.org/abs/1707.01083.

    Parameters:
    ----------
    channels : list of list of int
        Number of output channels for each unit.
    init_block_channels : int
        Number of output channels for the initial unit.
    side_channels : int
        Number of side channels in a ME-unit.
    groups : int
        Number of groups in convolution layers.
    in_channels : int, default 3
        Number of input channels.
    in_size : tuple of two ints, default (224, 224)
        Spatial size of the expected input image.
    classes : int, default 1000
        Number of classification classes.
    """
    input_shape = (in_channels, 224,
                   224) if K.image_data_format() == 'channels_first' else (
                       224, 224, in_channels)
    input = nn.Input(shape=input_shape)

    x = me_init_block(x=input,
                      in_channels=in_channels,
                      out_channels=init_block_channels,
                      name="features/init_block")
    in_channels = init_block_channels
    for i, channels_per_stage in enumerate(channels):
        for j, out_channels in enumerate(channels_per_stage):
            downsample = (j == 0)
            ignore_group = (i == 0) and (j == 0)
            x = me_unit(x=x,
                        in_channels=in_channels,
                        out_channels=out_channels,
                        side_channels=side_channels,
                        groups=groups,
                        downsample=downsample,
                        ignore_group=ignore_group,
                        name="features/stage{}/unit{}".format(i + 1, j + 1))
            in_channels = out_channels
    x = nn.AvgPool2D(pool_size=7, strides=1, name="features/final_pool")(x)

    x = nn.Flatten()(x)
    x = nn.Dense(units=classes, input_dim=in_channels, name="output")(x)

    model = Model(inputs=input, outputs=x)
    model.in_size = in_size
    model.classes = classes
    return model
Esempio n. 5
0
def senet(channels,
          init_block_channels,
          cardinality,
          bottleneck_width,
          in_channels=3,
          in_size=(224, 224),
          classes=1000):
    """
    SENet model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.

    Parameters:
    ----------
    channels : list of list of int
        Number of output channels for each unit.
    init_block_channels : int
        Number of output channels for the initial unit.
    cardinality: int
        Number of groups.
    bottleneck_width: int
        Width of bottleneck block.
    in_channels : int, default 3
        Number of input channels.
    in_size : tuple of two ints, default (224, 224)
        Spatial size of the expected input image.
    classes : int, default 1000
        Number of classification classes.
    """
    input_shape = (in_channels, in_size[0], in_size[1]) if is_channels_first() else\
        (in_size[0], in_size[1], in_channels)
    input = nn.Input(shape=input_shape)

    x = senet_init_block(x=input,
                         in_channels=in_channels,
                         out_channels=init_block_channels,
                         name="features/init_block")
    in_channels = init_block_channels
    for i, channels_per_stage in enumerate(channels):
        identity_conv3x3 = (i != 0)
        for j, out_channels in enumerate(channels_per_stage):
            strides = 2 if (j == 0) and (i != 0) else 1
            x = senet_unit(x=x,
                           in_channels=in_channels,
                           out_channels=out_channels,
                           strides=strides,
                           cardinality=cardinality,
                           bottleneck_width=bottleneck_width,
                           identity_conv3x3=identity_conv3x3,
                           name="features/stage{}/unit{}".format(i + 1, j + 1))
            in_channels = out_channels
    x = nn.AvgPool2D(pool_size=7, strides=1, name="features/final_pool")(x)

    # x = nn.Flatten()(x)
    x = flatten(x)
    x = nn.Dropout(rate=0.2, name="output/dropout")(x)
    x = nn.Dense(units=classes, input_dim=in_channels, name="output/fc")(x)

    model = Model(inputs=input, outputs=x)
    model.in_size = in_size
    model.classes = classes
    return model
Esempio n. 6
0
def transition_block(x, in_channels, out_channels, name="transition_block"):
    """
    DenseNet's auxiliary block, which can be treated as the initial part of the DenseNet unit, triggered only in the
    first unit of each stage.

    Parameters:
    ----------
    x : keras.backend tensor/variable/symbol
        Input tensor/variable/symbol.
    in_channels : int
        Number of input channels.
    out_channels : int
        Number of output channels.
    name : str, default 'transition_block'
        Unit name.

    Returns
    -------
    keras.backend tensor/variable/symbol
        Resulted tensor.
    """
    x = pre_conv1x1_block(x=x,
                          in_channels=in_channels,
                          out_channels=out_channels,
                          name=name + "/conv")
    x = nn.AvgPool2D(pool_size=2,
                     strides=2,
                     padding="valid",
                     name=name + "/pool")(x)
    return x
Esempio n. 7
0
def preresnet(channels,
              init_block_channels,
              bottleneck,
              conv1_stride,
              in_channels=3,
              in_size=(224, 224),
              classes=1000):
    """
    PreResNet model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.

    Parameters:
    ----------
    channels : list of list of int
        Number of output channels for each unit.
    init_block_channels : int
        Number of output channels for the initial unit.
    bottleneck : bool
        Whether to use a bottleneck or simple block in units.
    conv1_stride : bool
        Whether to use stride in the first or the second convolution layer in units.
    in_channels : int, default 3
        Number of input channels.
    in_size : tuple of two ints, default (224, 224)
        Spatial size of the expected input image.
    classes : int, default 1000
        Number of classification classes.
    """
    input_shape = (in_channels, 224,
                   224) if K.image_data_format() == "channels_first" else (
                       224, 224, in_channels)
    input = nn.Input(shape=input_shape)

    x = preres_init_block(x=input,
                          in_channels=in_channels,
                          out_channels=init_block_channels,
                          name="features/init_block")
    in_channels = init_block_channels
    for i, channels_per_stage in enumerate(channels):
        for j, out_channels in enumerate(channels_per_stage):
            strides = 2 if (j == 0) and (i != 0) else 1
            x = preres_unit(x=x,
                            in_channels=in_channels,
                            out_channels=out_channels,
                            strides=strides,
                            bottleneck=bottleneck,
                            conv1_stride=conv1_stride,
                            name="features/stage{}/unit{}".format(
                                i + 1, j + 1))
            in_channels = out_channels
    x = preres_activation(x=x, name="features/post_activ")
    x = nn.AvgPool2D(pool_size=7, strides=1, name="features/final_pool")(x)

    x = nn.Flatten()(x)
    x = nn.Dense(units=classes, input_dim=in_channels, name="output")(x)

    model = Model(inputs=input, outputs=x)
    model.in_size = in_size
    model.classes = classes
    return model
Esempio n. 8
0
def densenet(channels,
             init_block_channels,
             dropout_rate=0.0,
             in_channels=3,
             in_size=(224, 224),
             classes=1000):
    """
    DenseNet model from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993.

    Parameters:
    ----------
    channels : list of list of int
        Number of output channels for each unit.
    init_block_channels : int
        Number of output channels for the initial unit.
    dropout_rate : float, default 0.0
        Parameter of Dropout layer. Faction of the input units to drop.
    in_channels : int, default 3
        Number of input channels.
    in_size : tuple of two ints, default (224, 224)
        Spatial size of the expected input image.
    classes : int, default 1000
        Number of classification classes.
    """
    input_shape = (in_channels, in_size[0], in_size[1]) if is_channels_first() else\
        (in_size[0], in_size[1], in_channels)
    input = nn.Input(shape=input_shape)

    x = preres_init_block(x=input,
                          in_channels=in_channels,
                          out_channels=init_block_channels,
                          name="features/init_block")
    in_channels = init_block_channels
    for i, channels_per_stage in enumerate(channels):
        if i != 0:
            x = transition_block(x=x,
                                 in_channels=in_channels,
                                 out_channels=(in_channels // 2),
                                 name="features/stage{}/trans{}".format(
                                     i + 1, i + 1))
            in_channels = in_channels // 2
        for j, out_channels in enumerate(channels_per_stage):
            x = dense_unit(x=x,
                           in_channels=in_channels,
                           out_channels=out_channels,
                           dropout_rate=dropout_rate,
                           name="features/stage{}/unit{}".format(i + 1, j + 1))
            in_channels = out_channels
    x = preres_activation(x=x, name="features/post_activ")
    x = nn.AvgPool2D(pool_size=7, strides=1, name="features/final_pool")(x)

    # x = nn.Flatten()(x)
    x = flatten(x)
    x = nn.Dense(units=classes, input_dim=in_channels, name="output")(x)

    model = Model(inputs=input, outputs=x)
    model.in_size = in_size
    model.classes = classes
    return model
Esempio n. 9
0
def mobilenet(channels,
              first_stage_stride,
              in_channels=3,
              in_size=(224, 224),
              classes=1000):
    """
    MobileNet model from 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,'
    https://arxiv.org/abs/1704.04861. Also this class implements FD-MobileNet from 'FD-MobileNet: Improved MobileNet
    with A Fast Downsampling Strategy,' https://arxiv.org/abs/1802.03750.

    Parameters:
    ----------
    channels : list of list of int
        Number of output channels for each unit.
    first_stage_stride : bool
        Whether stride is used at the first stage.
    in_channels : int, default 3
        Number of input channels.
    in_size : tuple of two ints, default (224, 224)
        Spatial size of the expected input image.
    classes : int, default 1000
        Number of classification classes.
    """
    input_shape = (in_channels, 224, 224) if K.image_data_format() == 'channels_first' else (224, 224, in_channels)
    input = nn.Input(shape=input_shape)

    init_block_channels = channels[0][0]
    x = conv3x3_block(
        x=input,
        in_channels=in_channels,
        out_channels=init_block_channels,
        strides=2,
        name="features/init_block")
    in_channels = init_block_channels
    for i, channels_per_stage in enumerate(channels[1:]):
        for j, out_channels in enumerate(channels_per_stage):
            strides = 2 if (j == 0) and ((i != 0) or first_stage_stride) else 1
            x = dws_conv_block(
                x=x,
                in_channels=in_channels,
                out_channels=out_channels,
                strides=strides,
                name="features/stage{}/unit{}".format(i + 1, j + 1))
            in_channels = out_channels
    x = nn.AvgPool2D(
        pool_size=7,
        strides=1,
        name="features/final_pool")(x)

    x = nn.Flatten()(x)
    x = nn.Dense(
        units=classes,
        input_dim=in_channels,
        name="output")(x)

    model = Model(inputs=input, outputs=x)
    model.in_size = in_size
    model.classes = classes
    return model
Esempio n. 10
0
def aux_clf(in_tensor):
    avg_pool = layers.AvgPool2D(5, 3)(in_tensor)
    conv = conv1x1(128)(avg_pool)
    flattened = layers.Flatten()(conv)
    dense = layers.Dense(1024, activation='relu')(flattened)
    dropout = layers.Dropout(0.7)(dense)
    out = layers.Dense(1000, activation='softmax')(dropout)
    return out
Esempio n. 11
0
def se_block(x,
             channels,
             reduction=16,
             approx_sigmoid=False,
             round_mid=False,
             activation="relu",
             name="se_block"):
    """
    Squeeze-and-Excitation block from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.

    Parameters:
    ----------
    x : keras.backend tensor/variable/symbol
        Input tensor/variable/symbol.
    channels : int
        Number of channels.
    reduction : int, default 16
        Squeeze reduction value.
    approx_sigmoid : bool, default False
        Whether to use approximated sigmoid function.
    round_mid : bool, default False
        Whether to round middle channel number (make divisible by 8).
    activation : function or str, default 'relu'
        Activation function or name of activation function.
    name : str, default 'se_block'
        Block name.

    Returns
    -------
    keras.backend tensor/variable/symbol
        Resulted tensor/variable/symbol.
    """
    assert(len(x._keras_shape) == 4)
    mid_channels = channels // reduction if not round_mid else round_channels(float(channels) / reduction)
    pool_size = x._keras_shape[2:4] if is_channels_first() else x._keras_shape[1:3]

    w = nn.AvgPool2D(
        pool_size=pool_size,
        name=name + "/pool")(x)
    w = conv1x1(
        x=w,
        in_channels=channels,
        out_channels=mid_channels,
        use_bias=True,
        name=name + "/conv1")
    w = get_activation_layer(
        x=w,
        activation=activation,
        name=name + "/activ")
    w = conv1x1(
        x=w,
        in_channels=mid_channels,
        out_channels=channels,
        use_bias=True,
        name=name + "/conv2")
    w = HSigmoid(name=name + "/hsigmoid")(w) if approx_sigmoid else nn.Activation("sigmoid", name=name + "/sigmoid")(w)
    x = nn.multiply([x, w], name=name + "/mul")
    return x
Esempio n. 12
0
def resnext(channels,
            init_block_channels,
            cardinality,
            bottleneck_width,
            use_se,
            in_channels=3,
            classes=1000):
    """
    ResNeXt model from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431.
    Also this class implements SE-ResNeXt from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.

    Parameters:
    ----------
    channels : list of list of int
        Number of output channels for each unit.
    init_block_channels : int
        Number of output channels for the initial unit.
    cardinality: int
        Number of groups.
    bottleneck_width: int
        Width of bottleneck block.
    use_se : bool
        Whether to use SE block.
    in_channels : int, default 3
        Number of input channels.
    classes : int, default 1000
        Number of classification classes.
    """
    input_shape = (in_channels, 224,
                   224) if K.image_data_format() == 'channels_first' else (
                       224, 224, in_channels)
    input = nn.Input(shape=input_shape)

    x = resnext_init_block(x=input,
                           in_channels=in_channels,
                           out_channels=init_block_channels,
                           name="features/init_block")
    in_channels = init_block_channels
    for i, channels_per_stage in enumerate(channels):
        for j, out_channels in enumerate(channels_per_stage):
            strides = 2 if (j == 0) and (i != 0) else 1
            x = resnext_unit(x=x,
                             in_channels=in_channels,
                             out_channels=out_channels,
                             strides=strides,
                             cardinality=cardinality,
                             bottleneck_width=bottleneck_width,
                             use_se=use_se,
                             name="features/stage{}/unit{}".format(
                                 i + 1, j + 1))
            in_channels = out_channels
    x = nn.AvgPool2D(pool_size=7, strides=1, name="features/final_pool")(x)

    x = nn.Flatten()(x)
    x = nn.Dense(units=classes, input_dim=in_channels, name="output")(x)

    model = Model(inputs=input, outputs=x)
    return model
Esempio n. 13
0
def resnext(channels,
            init_block_channels,
            cardinality,
            bottleneck_width,
            in_channels=3,
            in_size=(224, 224),
            classes=1000):
    """
    ResNeXt model from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431.

    Parameters:
    ----------
    channels : list of list of int
        Number of output channels for each unit.
    init_block_channels : int
        Number of output channels for the initial unit.
    cardinality: int
        Number of groups.
    bottleneck_width: int
        Width of bottleneck block.
    in_channels : int, default 3
        Number of input channels.
    in_size : tuple of two ints, default (224, 224)
        Spatial size of the expected input image.
    classes : int, default 1000
        Number of classification classes.
    """
    input_shape = (in_channels, in_size[0], in_size[1]) if is_channels_first() else\
        (in_size[0], in_size[1], in_channels)
    input = nn.Input(shape=input_shape)

    x = res_init_block(x=input,
                       in_channels=in_channels,
                       out_channels=init_block_channels,
                       name="features/init_block")
    in_channels = init_block_channels
    for i, channels_per_stage in enumerate(channels):
        for j, out_channels in enumerate(channels_per_stage):
            strides = 2 if (j == 0) and (i != 0) else 1
            x = resnext_unit(x=x,
                             in_channels=in_channels,
                             out_channels=out_channels,
                             strides=strides,
                             cardinality=cardinality,
                             bottleneck_width=bottleneck_width,
                             name="features/stage{}/unit{}".format(
                                 i + 1, j + 1))
            in_channels = out_channels
    x = nn.AvgPool2D(pool_size=7, strides=1, name="features/final_pool")(x)

    # x = nn.Flatten()(x)
    x = flatten(x)
    x = nn.Dense(units=classes, input_dim=in_channels, name="output")(x)

    model = Model(inputs=input, outputs=x)
    model.in_size = in_size
    model.classes = classes
    return model
Esempio n. 14
0
def squeezenext(channels,
                init_block_channels,
                final_block_channels,
                in_channels=3,
                in_size=(224, 224),
                classes=1000):
    """
    SqueezeNext model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.

    Parameters:
    ----------
    channels : list of list of int
        Number of output channels for each unit.
    init_block_channels : int
        Number of output channels for the initial unit.
    final_block_channels : int
        Number of output channels for the final block of the feature extractor.
    in_channels : int, default 3
        Number of input channels.
    in_size : tuple of two ints, default (224, 224)
        Spatial size of the expected input image.
    classes : int, default 1000
        Number of classification classes.
    """
    input_shape = (in_channels, 224,
                   224) if K.image_data_format() == 'channels_first' else (
                       224, 224, in_channels)
    input = nn.Input(shape=input_shape)

    x = sqnxt_init_block(x=input,
                         in_channels=in_channels,
                         out_channels=init_block_channels,
                         name="features/init_block")
    in_channels = init_block_channels
    for i, channels_per_stage in enumerate(channels):
        for j, out_channels in enumerate(channels_per_stage):
            strides = 2 if (j == 0) and (i != 0) else 1
            x = sqnxt_unit(x=x,
                           in_channels=in_channels,
                           out_channels=out_channels,
                           strides=strides,
                           name="features/stage{}/unit{}".format(i + 1, j + 1))
            in_channels = out_channels
    x = conv1x1_block(x=x,
                      in_channels=in_channels,
                      out_channels=final_block_channels,
                      use_bias=True,
                      name="features/final_block")
    in_channels = final_block_channels
    x = nn.AvgPool2D(pool_size=7, strides=1, name="features/final_pool")(x)

    x = nn.Flatten()(x)
    x = nn.Dense(units=classes, input_dim=in_channels, name="output")(x)

    model = Model(inputs=input, outputs=x)
    model.in_size = in_size
    model.classes = classes
    return model
Esempio n. 15
0
def seresnet(channels,
             init_block_channels,
             bottleneck,
             conv1_stride,
             in_channels=3,
             in_size=(224, 224),
             classes=1000):
    """
    SE-ResNet model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.

    Parameters:
    ----------
    channels : list of list of int
        Number of output channels for each unit.
    init_block_channels : int
        Number of output channels for the initial unit.
    bottleneck : bool
        Whether to use a bottleneck or simple block in units.
    conv1_stride : bool
        Whether to use stride in the first or the second convolution layer in units.
    in_channels : int, default 3
        Number of input channels.
    in_size : tuple of two ints, default (224, 224)
        Spatial size of the expected input image.
    classes : int, default 1000
        Number of classification classes.
    """
    input_shape = (in_channels, in_size[0], in_size[1]) if is_channels_first() else\
        (in_size[0], in_size[1], in_channels)
    input = nn.Input(shape=input_shape)

    x = res_init_block(x=input,
                       in_channels=in_channels,
                       out_channels=init_block_channels,
                       name="features/init_block")
    in_channels = init_block_channels
    for i, channels_per_stage in enumerate(channels):
        for j, out_channels in enumerate(channels_per_stage):
            strides = 2 if (j == 0) and (i != 0) else 1
            x = seres_unit(x=x,
                           in_channels=in_channels,
                           out_channels=out_channels,
                           strides=strides,
                           bottleneck=bottleneck,
                           conv1_stride=conv1_stride,
                           name="features/stage{}/unit{}".format(i + 1, j + 1))
            in_channels = out_channels
    x = nn.AvgPool2D(pool_size=7, strides=1, name="features/final_pool")(x)

    # x = nn.Flatten()(x)
    x = flatten(x)
    x = nn.Dense(units=classes, input_dim=in_channels, name="output")(x)

    model = Model(inputs=input, outputs=x)
    model.in_size = in_size
    model.classes = classes
    return model
Esempio n. 16
0
def resnet(channels,
           init_block_channels,
           bottleneck,
           conv1_stride,
           use_se,
           in_channels=3,
           classes=1000):
    """
    ResNet model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Also this class
    implements SE-ResNet from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.

    Parameters:
    ----------
    channels : list of list of int
        Number of output channels for each unit.
    init_block_channels : int
        Number of output channels for the initial unit.
    bottleneck : bool
        Whether to use a bottleneck or simple block in units.
    conv1_stride : bool
        Whether to use stride in the first or the second convolution layer in units.
    use_se : bool
        Whether to use SE block.
    in_channels : int, default 3
        Number of input channels.
    classes : int, default 1000
        Number of classification classes.
    """
    input_shape = (in_channels, 224,
                   224) if K.image_data_format() == 'channels_first' else (
                       224, 224, in_channels)
    input = nn.Input(shape=input_shape)

    x = res_init_block(x=input,
                       in_channels=in_channels,
                       out_channels=init_block_channels,
                       name="features/init_block")
    in_channels = init_block_channels
    for i, channels_per_stage in enumerate(channels):
        for j, out_channels in enumerate(channels_per_stage):
            strides = 2 if (j == 0) and (i != 0) else 1
            x = res_unit(x=x,
                         in_channels=in_channels,
                         out_channels=out_channels,
                         strides=strides,
                         bottleneck=bottleneck,
                         conv1_stride=conv1_stride,
                         use_se=use_se,
                         name="features/stage{}/unit{}".format(i + 1, j + 1))
            in_channels = out_channels
    x = nn.AvgPool2D(pool_size=7, strides=1, name="features/final_pool")(x)

    x = nn.Flatten()(x)
    x = nn.Dense(units=classes, input_dim=in_channels, name="output")(x)

    model = Model(inputs=input, outputs=x)
    return model
Esempio n. 17
0
    def build(width=400, height=400, depth=3, classes=3):

        #create a sequential model
        model = models.Sequential()

        #define the input dimensions
        input_format = Input(shape=(400, 400, 3), name='image_input')

        #use the pretrained model until the last conv-pool block
        vgg_model = VGG16(weights='imagenet',
                          include_top=False,
                          input_tensor=input_format)

        #freeze the layers for training
        for layer in vgg_model.layers:
            layer.trainable = False

        #add the VGG convolutional base model
        model.add(vgg_model)

        #add new 3 layers of convolution and then avg_pooling and then softmax
        #filters = 4096 kernel size = (6,6), stride = 6
        model.add(
            layers.Conv2D(filters=4096,
                          kernel_size=(6, 6),
                          activation='relu',
                          strides=(6, 6)))

        #filters = 4096 kernel size = (1,1), stride = 1
        model.add(
            layers.Conv2D(filters=4096,
                          kernel_size=(1, 1),
                          activation='relu',
                          strides=(1, 1)))

        #filters = 3 kernel size = (1,1), stride = 1
        model.add(
            layers.Conv2D(filters=3,
                          kernel_size=(1, 1),
                          activation='relu',
                          strides=(1, 1)))

        #add avg_pooling layer pool_size=(2,2)
        model.add(layers.AvgPool2D(pool_size=(2, 2)))

        #flatten the output
        model.add(layers.Flatten())

        #add softmax
        model.add(layers.Dense(units=3, activation="softmax"))

        return model
Esempio n. 18
0
def se_block(x,
             channels,
             reduction=16,
             name="se_block"):
    """
    Squeeze-and-Excitation block from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.

    Parameters:
    ----------
    x : keras.backend tensor/variable/symbol
        Input tensor/variable/symbol.
    channels : int
        Number of channels.
    reduction : int, default 16
        Squeeze reduction value.
    name : str, default 'se_block'
        Block name.

    Returns
    -------
    keras.backend tensor/variable/symbol
        Resulted tensor/variable/symbol.
    """
    assert(len(x._keras_shape) == 4)
    mid_cannels = channels // reduction
    pool_size = x._keras_shape[2:4] if is_channels_first() else x._keras_shape[1:3]

    w = nn.AvgPool2D(
        pool_size=pool_size,
        name=name + "/pool")(x)
    w = conv1x1(
        x=w,
        in_channels=channels,
        out_channels=mid_cannels,
        use_bias=True,
        name=name + "/conv1")
    w = nn.Activation("relu", name=name + "/relu")(w)
    w = conv1x1(
        x=w,
        in_channels=mid_cannels,
        out_channels=channels,
        use_bias=True,
        name=name + "/conv2")
    w = nn.Activation("sigmoid", name=name + "/sigmoid")(w)
    x = nn.multiply([x, w], name=name + "/mul")
    return x
Esempio n. 19
0
def inception_v1(in_shape=(224, 224, 3), n_classes=1000, opt='sgd'):
    in_layer = layers.Input(in_shape)

    conv1 = layers.Conv2D(64, 7, strides=2, activation='relu',
                          padding='same')(in_layer)
    #pool1 = layers.MaxPool2D(3, 2, padding='same')(conv1)
    pad1 = layers.ZeroPadding2D()(conv1)
    pool1 = layers.MaxPool2D(3, 2)(pad1)

    conv2 = layers.Conv2D(192, 3, strides=1, activation='relu',
                          padding='same')(pool1)
    pad2 = layers.ZeroPadding2D()(conv2)
    pool2 = layers.MaxPool2D(3, 2)(pad2)

    inception_3a = inception_module(pool2, 64, 96, 128, 16, 32, 32)
    inception_3b = inception_module(inception_3a, 128, 128, 192, 32, 96, 64)
    pad3 = layers.ZeroPadding2D()(inception_3b)
    pool3 = layers.MaxPool2D(3, 2)(pad3)

    inception_4a = inception_module(pool3, 192, 96, 208, 16, 48, 64)
    inception_4b = inception_module(inception_4a, 160, 112, 224, 24, 64, 64)
    inception_4c = inception_module(inception_4b, 128, 128, 256, 24, 64, 64)
    inception_4d = inception_module(inception_4c, 112, 144, 288, 32, 64, 64)
    inception_4e = inception_module(inception_4d, 256, 160, 320, 32, 128, 128)
    pad4 = layers.ZeroPadding2D()(inception_4e)
    pool4 = layers.MaxPool2D(3, 2)(pad4)

    aux0 = aux_clf(inception_4a, n_classes)
    aux1 = aux_clf(inception_4d, n_classes)

    inception_5a = inception_module(pool4, 256, 160, 320, 32, 128, 128)
    inception_5b = inception_module(inception_5a, 384, 192, 384, 48, 128, 128)

    #pad5 = layers.ZeroPadding2D()(inception_5b)
    pool5 = layers.AvgPool2D(7, 1)(inception_5b)
    drop = layers.Dropout(0.4)(pool5)
    flat = layers.Flatten()(drop)

    preds = layers.Dense(n_classes, activation='softmax')(flat)

    model = Model(in_layer, [preds, aux0, aux1])
    model.compile(loss="categorical_crossentropy",
                  optimizer=opt,
                  metrics=["accuracy"])
    return model
Esempio n. 20
0
def se_block(x, channels, reduction=16, name="se_block"):
    """
    Squeeze-and-Excitation block from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.

    Parameters:
    ----------
    x : keras.backend tensor/variable/symbol
        Input tensor/variable/symbol.
    channels : int
        Number of channels.
    reduction : int, default 16
        Squeeze reduction value.
    name : str, default 'se_block'
        Block name.

    Returns
    -------
    keras.backend tensor/variable/symbol
        Resulted tensor/variable/symbol.
    """
    mid_cannels = channels // reduction

    conv1 = conv1x1(out_channels=mid_cannels,
                    use_bias=True,
                    name=name + "/conv1")
    relu = nn.Activation('relu', name=name + "/relu")
    conv2 = conv1x1(out_channels=channels, use_bias=True, name=name + "/conv2")
    sigmoid = nn.Activation('sigmoid', name=name + "/sigmoid")

    assert (len(x.shape) == 4)
    pool_size = x.shape[2:4] if K.image_data_format(
    ) == 'channels_first' else x.shape[1:3]
    w = nn.AvgPool2D(pool_size=pool_size, name=name + "/pool")(x)
    w = conv1(w)
    w = relu(w)
    w = conv2(w)
    w = sigmoid(w)
    x = nn.multiply([x, w], name=name + "/mul")
    return x
Esempio n. 21
0
def shufflenetv2(channels,
                 init_block_channels,
                 final_block_channels,
                 use_se=False,
                 use_residual=False,
                 in_channels=3,
                 classes=1000):
    """
    ShuffleNetV2 model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
    https://arxiv.org/abs/1807.11164.

    Parameters:
    ----------
    channels : list of list of int
        Number of output channels for each unit.
    init_block_channels : int
        Number of output channels for the initial unit.
    final_block_channels : int
        Number of output channels for the final block of the feature extractor.
    use_se : bool, default False
        Whether to use SE block.
    use_residual : bool, default False
        Whether to use residual connections.
    in_channels : int, default 3
        Number of input channels.
    classes : int, default 1000
        Number of classification classes.
    """
    input_shape = (in_channels, 224,
                   224) if K.image_data_format() == 'channels_first' else (
                       224, 224, in_channels)
    input = nn.Input(shape=input_shape)

    x = shuffle_init_block(x=input,
                           in_channels=in_channels,
                           out_channels=init_block_channels,
                           name="features/init_block")
    in_channels = init_block_channels
    for i, channels_per_stage in enumerate(channels):
        for j, out_channels in enumerate(channels_per_stage):
            downsample = (j == 0)
            x = shuffle_unit(x=x,
                             in_channels=in_channels,
                             out_channels=out_channels,
                             downsample=downsample,
                             use_se=use_se,
                             use_residual=use_residual,
                             name="features/stage{}/unit{}".format(
                                 i + 1, j + 1))
            in_channels = out_channels
    x = shuffle_conv1x1(x=x,
                        in_channels=in_channels,
                        out_channels=final_block_channels,
                        name="features/final_block")
    in_channels = final_block_channels
    x = nn.AvgPool2D(pool_size=7, strides=1, name="features/final_pool")(x)

    x = nn.Flatten()(x)
    x = nn.Dense(units=classes, input_dim=in_channels, name="output")(x)

    model = Model(inputs=input, outputs=x)
    return model
Esempio n. 22
0
def avgpool2d(x, pool_size, strides, padding=0, ceil_mode=False, name=None):
    """
    Average pooling operation for two dimensional (spatial) data.

    Parameters:
    ----------
    x : keras.backend tensor/variable/symbol
        Input tensor/variable/symbol.
    pool_size : int or tuple/list of 2 int
        Size of the max pooling windows.
    strides : int or tuple/list of 2 int
        Strides of the pooling.
    padding : int or tuple/list of 2 int, default 0
        Padding value for convolution layer.
    ceil_mode : bool, default False
        When `True`, will use ceil instead of floor to compute the output shape.
    name : str, default None
        Layer name.

    Returns
    -------
    keras.backend tensor/variable/symbol
        Resulted tensor/variable/symbol.
    """
    if isinstance(pool_size, int):
        pool_size = (pool_size, pool_size)
    if isinstance(strides, int):
        strides = (strides, strides)
    if isinstance(padding, int):
        padding = (padding, padding)

    assert (padding[0] == 0) or (padding[0] == (pool_size[0] - 1) // 2)
    assert (padding[1] == 0) or (padding[1] == (pool_size[1] - 1) // 2)

    padding_ke = "valid" if padding[0] == 0 else "same"

    if K.backend() == "tensorflow":
        if ceil_mode:
            height = int(x.shape[2])
            out_height = float(height + 2 * padding[0] -
                               pool_size[0]) / strides[0] + 1.0
            if math.ceil(out_height) > math.floor(out_height):
                padding = (padding[0] + 1, padding[1])
            width = int(x.shape[3])
            out_width = float(width + 2 * padding[1] -
                              pool_size[1]) / strides[1] + 1.0
            if math.ceil(out_width) > math.floor(out_width):
                padding = (padding[0], padding[1] + 1)

        if (padding[0] > 0) or (padding[1] > 0):
            import tensorflow as tf
            x = nn.Lambda((lambda z: tf.pad(z, [[0, 0], [0, 0],
                                                list(padding),
                                                list(padding)],
                                            mode="REFLECT")
                           ) if is_channels_first() else (
                               lambda z: tf.pad(z, [[0, 0],
                                                    list(padding),
                                                    list(padding), [0, 0]],
                                                mode="REFLECT")))(x)

        x = nn.AvgPool2D(pool_size=pool_size,
                         strides=1,
                         padding="valid",
                         name=name + "/pool")(x)

        if (strides[0] > 1) or (strides[1] > 1):
            x = nn.AvgPool2D(pool_size=1,
                             strides=strides,
                             padding="valid",
                             name=name + "/stride")(x)
        return x

    x = nn.AvgPool2D(pool_size=pool_size,
                     strides=strides,
                     padding=padding_ke,
                     name=name + "/pool")(x)
    return x
Esempio n. 23
0
def shuffle_unit(x,
                 in_channels,
                 out_channels,
                 groups,
                 downsample,
                 ignore_group,
                 name="shuffle_unit"):
    """
    ShuffleNet unit.

    Parameters:
    ----------
    x : keras.backend tensor/variable/symbol
        Input tensor/variable/symbol.
    in_channels : int
        Number of input channels.
    out_channels : int
        Number of output channels.
    groups : int
        Number of groups in convolution layers.
    downsample : bool
        Whether do downsample.
    ignore_group : bool
        Whether ignore group value in the first convolution layer.
    name : str, default 'shuffle_unit'
        Unit name.

    Returns
    -------
    keras.backend tensor/variable/symbol
        Resulted tensor/variable/symbol.
    """
    mid_channels = out_channels // 4

    if downsample:
        out_channels -= in_channels

    identity = x

    x = conv1x1(x=x,
                in_channels=in_channels,
                out_channels=mid_channels,
                groups=(1 if ignore_group else groups),
                name=name + "/compress_conv1")
    x = GluonBatchNormalization(name=name + "/compress_bn1")(x)
    x = nn.Activation("relu", name=name + "/activ")(x)

    x = channel_shuffle_lambda(channels=mid_channels,
                               groups=groups,
                               name=name + "/c_shuffle")(x)

    x = depthwise_conv3x3(x=x,
                          channels=mid_channels,
                          strides=(2 if downsample else 1),
                          name=name + "/dw_conv2")
    x = GluonBatchNormalization(name=name + "/dw_bn2")(x)

    x = conv1x1(x=x,
                in_channels=mid_channels,
                out_channels=out_channels,
                groups=groups,
                name=name + "/expand_conv3")
    x = GluonBatchNormalization(name=name + "/expand_bn3")(x)

    x._keras_shape = tuple([d if d != 0 else None for d in x.shape])

    if downsample:
        identity = nn.AvgPool2D(pool_size=3,
                                strides=2,
                                padding="same",
                                name=name + "/avgpool")(identity)

        channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
        x = nn.concatenate([x, identity],
                           axis=channel_axis,
                           name=name + "/concat")
    else:
        x = nn.add([x, identity], name=name + "/add")

    x = nn.Activation("relu", name=name + "/final_activ")(x)
    return x
Esempio n. 24
0
def mobilenetv2(channels,
                init_block_channels,
                final_block_channels,
                in_channels=3,
                in_size=(224, 224),
                classes=1000):
    """
    MobileNetV2 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,' https://arxiv.org/abs/1801.04381.

    Parameters:
    ----------
    channels : list of list of int
        Number of output channels for each unit.
    init_block_channels : int
        Number of output channels for the initial unit.
    final_block_channels : int
        Number of output channels for the final block of the feature extractor.
    in_channels : int, default 3
        Number of input channels.
    in_size : tuple of two ints, default (224, 224)
        Spatial size of the expected input image.
    classes : int, default 1000
        Number of classification classes.
    """
    input_shape = (in_channels, 224,
                   224) if is_channels_first() else (224, 224, in_channels)
    input = nn.Input(shape=input_shape)

    x = conv3x3_block(x=input,
                      in_channels=in_channels,
                      out_channels=init_block_channels,
                      strides=2,
                      activation="relu6",
                      name="features/init_block")
    in_channels = init_block_channels
    for i, channels_per_stage in enumerate(channels):
        for j, out_channels in enumerate(channels_per_stage):
            strides = 2 if (j == 0) and (i != 0) else 1
            expansion = (i != 0) or (j != 0)
            x = linear_bottleneck(x=x,
                                  in_channels=in_channels,
                                  out_channels=out_channels,
                                  strides=strides,
                                  expansion=expansion,
                                  name="features/stage{}/unit{}".format(
                                      i + 1, j + 1))
            in_channels = out_channels
    x = conv1x1_block(x=x,
                      in_channels=in_channels,
                      out_channels=final_block_channels,
                      activation="relu6",
                      name="features/final_block")
    in_channels = final_block_channels
    x = nn.AvgPool2D(pool_size=7, strides=1, name="features/final_pool")(x)

    x = conv1x1(x=x,
                in_channels=in_channels,
                out_channels=classes,
                use_bias=False,
                name="output")
    # x = nn.Flatten()(x)
    x = flatten(x)

    model = Model(inputs=input, outputs=x)
    model.in_size = in_size
    model.classes = classes
    return model
Esempio n. 25
0
def mnasnet_model(channels,
                  init_block_channels,
                  final_block_channels,
                  kernels3,
                  exp_factors,
                  se_factors,
                  init_block_use_skip,
                  final_block_use_skip,
                  in_channels=3,
                  in_size=(224, 224),
                  classes=1000):
    """
    MnasNet model from 'MnasNet: Platform-Aware Neural Architecture Search for Mobile,'
    https://arxiv.org/abs/1807.11626.

    Parameters:
    ----------
    channels : list of list of int
        Number of output channels for each unit.
    init_block_channels : list of 2 int
        Number of output channels for the initial unit.
    final_block_channels : list of 2 int
        Number of output channels for the final block of the feature extractor.
    kernels3 : list of list of int/bool
        Using 3x3 (instead of 5x5) kernel for each unit.
    exp_factors : list of list of int
        Expansion factor for each unit.
    se_factors : list of list of int
        SE reduction factor for each unit.
    init_block_use_skip : bool
        Whether to use skip connection in the initial unit.
    final_block_use_skip : bool
        Whether to use skip connection in the final block of the feature extractor.
    in_channels : int, default 3
        Number of input channels.
    in_size : tuple of two ints, default (224, 224)
        Spatial size of the expected input image.
    classes : int, default 1000
        Number of classification classes.
    """
    input_shape = (in_channels, in_size[0], in_size[1]) if is_channels_first() else\
        (in_size[0], in_size[1], in_channels)
    input = nn.Input(shape=input_shape)

    x = mnas_init_block(
        x=input,
        in_channels=in_channels,
        out_channels=init_block_channels[1],
        mid_channels=init_block_channels[0],
        use_skip=init_block_use_skip,
        name="features/init_block")
    in_channels = init_block_channels[1]
    for i, channels_per_stage in enumerate(channels):
        for j, out_channels in enumerate(channels_per_stage):
            strides = 2 if (j == 0) else 1
            use_kernel3 = kernels3[i][j] == 1
            exp_factor = exp_factors[i][j]
            se_factor = se_factors[i][j]
            x = dws_exp_se_res_unit(
                x=x,
                in_channels=in_channels,
                out_channels=out_channels,
                strides=strides,
                use_kernel3=use_kernel3,
                exp_factor=exp_factor,
                se_factor=se_factor,
                name="features/stage{}/unit{}".format(i + 1, j + 1))
            in_channels = out_channels
    x = mnas_final_block(
        x=x,
        in_channels=in_channels,
        out_channels=final_block_channels[1],
        mid_channels=final_block_channels[0],
        use_skip=final_block_use_skip,
        name="features/final_block")
    in_channels = final_block_channels[1]
    x = nn.AvgPool2D(
        pool_size=7,
        strides=1,
        name="features/final_pool")(x)

    # x = nn.Flatten()(x)
    x = flatten(x)
    x = nn.Dense(
        units=classes,
        input_dim=in_channels,
        name="output")(x)

    model = Model(inputs=input, outputs=x)
    model.in_size = in_size
    model.classes = classes
    return model
Esempio n. 26
0
def Unet(GAUSSIAN_NOISE=0.1,
         UPSAMPLE_MODE='SIMPLE',
         NET_SCALING=(1, 1),
         EDGE_CROP=16):
    def upsample_conv(filters, kernel_size, strides, padding):
        return layers.Conv2DTranspose(filters,
                                      kernel_size,
                                      strides=strides,
                                      padding=padding)

    def upsample_simple(filters, kernel_size, strides, padding):
        return layers.UpSampling2D(strides)

    if UPSAMPLE_MODE == 'DECONV':
        upsample = upsample_conv
    else:
        upsample = upsample_simple

    input_img = layers.Input((768, 768, 3), name='RGB_Input')
    pp_in_layer = input_img

    if NET_SCALING is not None:
        pp_in_layer = layers.AvgPool2D(NET_SCALING)(pp_in_layer)

    pp_in_layer = layers.GaussianNoise(GAUSSIAN_NOISE)(pp_in_layer)
    pp_in_layer = layers.BatchNormalization()(pp_in_layer)

    c1 = layers.Conv2D(16, (3, 3), activation='relu',
                       padding='same')(pp_in_layer)
    c1 = layers.Conv2D(16, (3, 3), activation='relu', padding='same')(c1)
    p1 = layers.MaxPooling2D((2, 2))(c1)

    c2 = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(p1)
    c2 = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(c2)
    p2 = layers.MaxPooling2D((2, 2))(c2)

    c3 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(p2)
    c3 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(c3)
    p3 = layers.MaxPooling2D((2, 2))(c3)

    c4 = layers.Conv2D(128, (3, 3), activation='relu', padding='same')(p3)
    c4 = layers.Conv2D(128, (3, 3), activation='relu', padding='same')(c4)
    p4 = layers.MaxPooling2D(pool_size=(2, 2))(c4)

    c5 = layers.Conv2D(256, (3, 3), activation='relu', padding='same')(p4)
    c5 = layers.Conv2D(256, (3, 3), activation='relu', padding='same')(c5)

    u6 = upsample(128, (2, 2), strides=(2, 2), padding='same')(c5)
    u6 = layers.concatenate([u6, c4])
    c6 = layers.Conv2D(128, (3, 3), activation='relu', padding='same')(u6)
    c6 = layers.Conv2D(128, (3, 3), activation='relu', padding='same')(c6)

    u7 = upsample(64, (2, 2), strides=(2, 2), padding='same')(c6)
    u7 = layers.concatenate([u7, c3])
    c7 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(u7)
    c7 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(c7)

    u8 = upsample(32, (2, 2), strides=(2, 2), padding='same')(c7)
    u8 = layers.concatenate([u8, c2])
    c8 = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(u8)
    c8 = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(c8)

    u9 = upsample(16, (2, 2), strides=(2, 2), padding='same')(c8)
    u9 = layers.concatenate([u9, c1], axis=3)
    c9 = layers.Conv2D(16, (3, 3), activation='relu', padding='same')(u9)
    c9 = layers.Conv2D(16, (3, 3), activation='relu', padding='same')(c9)

    d = layers.Conv2D(1, (1, 1), activation='sigmoid')(c9)
    # d = layers.Cropping2D((EDGE_CROP, EDGE_CROP))(d)
    # d = layers.ZeroPadding2D((EDGE_CROP, EDGE_CROP))(d)
    if NET_SCALING is not None:
        d = layers.UpSampling2D(NET_SCALING)(d)

    seg_model = models.Model(inputs=[input_img], outputs=[d])
    seg_model.summary()
    return seg_model
Esempio n. 27
0
# Build U-Net model
def upsample_conv(filters, kernel_size, strides, padding):
    return layers.Conv2DTranspose(filters, kernel_size, strides=strides, padding=padding)
def upsample_simple(filters, kernel_size, strides, padding):
    return layers.UpSampling2D(strides)

if UPSAMPLE_MODE=='DECONV':
    upsample=upsample_conv
else:
    upsample=upsample_simple
    
input_img = layers.Input(t_x.shape[1:], name = 'RGB_Input')
pp_in_layer = input_img

if NET_SCALING is not None:
    pp_in_layer = layers.AvgPool2D(NET_SCALING)(pp_in_layer)
    
pp_in_layer = layers.GaussianNoise(GAUSSIAN_NOISE)(pp_in_layer)
pp_in_layer = layers.BatchNormalization()(pp_in_layer)

c1 = layers.Conv2D(8, (3, 3), activation='relu', padding='same') (pp_in_layer)
c1 = layers.Conv2D(8, (3, 3), activation='relu', padding='same') (c1)
p1 = layers.MaxPooling2D((2, 2)) (c1)

c2 = layers.Conv2D(16, (3, 3), activation='relu', padding='same') (p1)
c2 = layers.Conv2D(16, (3, 3), activation='relu', padding='same') (c2)
p2 = layers.MaxPooling2D((2, 2)) (c2)

c3 = layers.Conv2D(32, (3, 3), activation='relu', padding='same') (p2)
c3 = layers.Conv2D(32, (3, 3), activation='relu', padding='same') (c3)
p3 = layers.MaxPooling2D((2, 2)) (c3)
Esempio n. 28
0
def igcv3(channels,
          init_block_channels,
          final_block_channels,
          in_channels=3,
          in_size=(224, 224),
          classes=1000):
    """
    IGCV3 model from 'IGCV3: Interleaved Low-Rank Group Convolutions for Efficient Deep Neural Networks,'
    https://arxiv.org/abs/1806.00178.

    Parameters:
    ----------
    channels : list of list of int
        Number of output channels for each unit.
    init_block_channels : int
        Number of output channels for the initial unit.
    final_block_channels : int
        Number of output channels for the final block of the feature extractor.
    in_channels : int, default 3
        Number of input channels.
    in_size : tuple of two ints, default (224, 224)
        Spatial size of the expected input image.
    classes : int, default 1000
        Number of classification classes.
    """
    input_shape = (in_channels, 224,
                   224) if K.image_data_format() == 'channels_first' else (
                       224, 224, in_channels)
    input = nn.Input(shape=input_shape)

    x = conv3x3_block(x=input,
                      in_channels=in_channels,
                      out_channels=init_block_channels,
                      strides=2,
                      activation="relu6",
                      name="features/init_block")
    in_channels = init_block_channels
    for i, channels_per_stage in enumerate(channels):
        for j, out_channels in enumerate(channels_per_stage):
            strides = 2 if (j == 0) and (i != 0) else 1
            expansion = (i != 0) or (j != 0)
            x = inv_res_unit(x=x,
                             in_channels=in_channels,
                             out_channels=out_channels,
                             strides=strides,
                             expansion=expansion,
                             name="features/stage{}/unit{}".format(
                                 i + 1, j + 1))
            in_channels = out_channels
    x = conv1x1_block(x=x,
                      in_channels=in_channels,
                      out_channels=final_block_channels,
                      activation="relu6",
                      name="features/final_block")
    in_channels = final_block_channels
    x = nn.AvgPool2D(pool_size=7, strides=1, name="features/final_pool")(x)

    x = nn.Flatten()(x)
    x = nn.Dense(units=classes, input_dim=in_channels, name="output")(x)

    model = Model(inputs=input, outputs=x)
    model.in_size = in_size
    model.classes = classes
    return model

def upsample_simple(filters, kernel_size, strides, padding):
    return layers.UpSampling2D(strides)


if UPSAMPLE_MODE == 'DECONV':
    upsample = upsample_conv
else:
    upsample = upsample_simple

input_img = layers.Input(t_x.shape[1:], name='RGB_Input')
pp_in_layer = input_img

if NET_SCALING is not None:
    pp_in_layer = layers.AvgPool2D(NET_SCALING)(pp_in_layer)

pp_in_layer = layers.GaussianNoise(GAUSSIAN_NOISE)(pp_in_layer)
pp_in_layer = layers.BatchNormalization()(pp_in_layer)

c1 = layers.Conv2D(8, (3, 3), activation='relu', padding='same')(pp_in_layer)
c1 = layers.Conv2D(8, (3, 3), activation='relu', padding='same')(c1)
p1 = layers.MaxPooling2D((2, 2))(c1)

c2 = layers.Conv2D(16, (3, 3), activation='relu', padding='same')(p1)
c2 = layers.Conv2D(16, (3, 3), activation='relu', padding='same')(c2)
p2 = layers.MaxPooling2D((2, 2))(c2)

c3 = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(p2)
c3 = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(c3)
p3 = layers.MaxPooling2D((2, 2))(c3)
Esempio n. 30
0
                     padding='same',
                     activation='relu',
                     **init_reg)(lyrout2)
lyrout4 = lyr.MaxPool2D(3, 2, 'same')(lyrout3)
lyrout5 = custom.inception(lyrout4, 64, 96, 128, 16, 32, 32, **init_reg)
lyrout6 = custom.inception(lyrout5, 128, 128, 192, 32, 96, 64, **init_reg)
lyrout7 = lyr.MaxPool2D(3, 2, 'same')(lyrout6)
lyrout8 = custom.inception(lyrout7, 192, 96, 208, 16, 48, 64, **init_reg)
lyrout9 = custom.inception(lyrout8, 160, 112, 224, 24, 64, 64, **init_reg)
lyrout10 = custom.inception(lyrout9, 128, 128, 256, 24, 64, 64, **init_reg)
lyrout11 = custom.inception(lyrout10, 112, 144, 288, 32, 64, 64, **init_reg)
lyrout12 = custom.inception(lyrout11, 256, 160, 320, 32, 128, 128, **init_reg)
lyrout13 = lyr.MaxPool2D(3, 2, 'same')(lyrout12)
lyrout14 = custom.inception(lyrout13, 256, 160, 320, 32, 128, 128, **init_reg)
lyrout15 = custom.inception(lyrout14, 384, 192, 384, 48, 128, 128, **init_reg)
lyrout16 = lyr.AvgPool2D(7, 1, 'valid')(lyrout15)
lyrout17 = lyr.Flatten()(lyrout16)
lyrout18 = lyr.Dropout(0.4)(lyrout17)
out = lyr.Dense(num_classes, activation='softmax', **init_reg)(lyrout18)

model = mod.Model(inputs=input, outputs=out)
"""
Optimizer, Loss, & Metrics

Using zero built in decay and relying exclusively on the heuristic
used in the original paper.
"""
optimizer = opt.Adam(0.001)
# This line below is what was used in the original ResNet paper. Adam is the recommended approach now though.
# We use this for GoogLeNet because its paper only specifies that they used momentum=0.9 but not what the initial
# learning rate was.