Ejemplo n.º 1
0
def grouped_convolution(y, nb_channels, _strides):
    # when `cardinality` == 1 this is just a standard convolution
    if cardinality == 1:
        #return Conv2D(nb_channels, kernel_size=(3, 3), strides=_strides, padding='same')(y)
        return OctaveConv2D(nb_channels,
                            kernel_size=(3, 3),
                            strides=_strides,
                            ratio_out=0.5)(y)

    assert not nb_channels % cardinality
    _d = nb_channels // cardinality

    # in a grouped convolution layer, input and output channels are divided into `cardinality` groups,
    # and convolutions are separately performed within each group
    groups = []
    for j in range(cardinality):
        group = Lambda(lambda z: z[:, :, :, j * _d:j * _d + _d])(y)
        #groups.append(Conv2D(_d, kernel_size=(3, 3), strides=_strides, padding='same')(group))
        groups.append(
            OctaveConv2D(_d,
                         kernel_size=(3, 3),
                         strides=_strides,
                         ratio_out=0.5)(group))

    # the grouped convolutional layer concatenates them as the outputs of the layer
    y = concatenate(groups)

    return y
Ejemplo n.º 2
0
def residual_block(y,
                   nb_channels_in,
                   nb_channels_out,
                   _strides=(1, 1),
                   _project_shortcut=False):
    """
    Our network consists of a stack of residual blocks. These blocks have the same topology,
    and are subject to two simple rules:
    - If producing spatial maps of the same size, the blocks share the same hyper-parameters (width and filter sizes).
    - Each time the spatial map is down-sampled by a factor of 2, the width of the blocks is multiplied by a factor of 2.
    """
    shortcut = y

    # we modify the residual building block as a bottleneck design to make the network more economical
    #y = Conv2D(nb_channels_in, kernel_size=(1, 1), strides=(1, 1), padding='same')(y)
    y = OctaveConv2D(nb_channels_in,
                     kernel_size=(1, 1),
                     strides=(1, 1),
                     ratio_out=0.5)(y)
    y = add_common_layers(y)

    # ResNeXt (identical to ResNet when `cardinality` == 1)
    y = grouped_convolution(y, nb_channels_in, _strides=_strides)
    y = add_common_layers(y)

    #y = Conv2D(nb_channels_out, kernel_size=(1, 1), strides=(1, 1), padding='same')(y)
    y = OctaveConv2D(nb_channels_out,
                     kernel_size=(1, 1),
                     strides=(1, 1),
                     ratio_out=0.5)(y)
    # batch normalization is employed after aggregating the transformations and before adding to the shortcut
    y = [BatchNormalization()(y[0]), BatchNormalization()(y[1])]

    # identity shortcuts used directly when the input and output are of the same dimensions
    if _project_shortcut or _strides != (1, 1):
        # when the dimensions increase projection shortcut is used to match dimensions (done by 1×1 convolutions)
        # when the shortcuts go across feature maps of two sizes, they are performed with a stride of 2
        #shortcut = Conv2D(nb_channels_out, kernel_size=(1, 1), strides=_strides, padding='same')(shortcut)
        shortcut = OctaveConv2D(nb_channels_out,
                                kernel_size=(1, 1),
                                strides=_strides,
                                ratio_out=0.5)(shortcut)
        #shortcut = oct_BatchNormalization()(shortcut)
        shortcut = [
            BatchNormalization()(shortcut[0]),
            BatchNormalization()(shortcut[1])
        ]

    y = [add([shortcut[0], y[0]]), add([shortcut[1], y[1]])]

    # relu is performed right after each batch normalization,
    # expect for the output of the block where relu is performed after the adding to the shortcut
    y = [LeakyReLU()(y[0]), LeakyReLU()(y[1])]

    return y
Ejemplo n.º 3
0
 def test_fit_stride(self):
     inputs = Input(shape=(32, 32, 3))
     high, low = OctaveConv2D(13, kernel_size=3, strides=(1, 2))(inputs)
     high, low = MaxPool2D()(high), MaxPool2D()(low)
     conv = OctaveConv2D(5, kernel_size=3, ratio_out=0.0)([high, low])
     flatten = Flatten()(conv)
     outputs = Dense(units=2, activation='softmax')(flatten)
     model = Model(inputs=inputs, outputs=outputs)
     model.compile(optimizer='adam', loss='sparse_categorical_crossentropy')
     model.summary(line_length=200)
     self._test_fit(model)
Ejemplo n.º 4
0
 def test_make_dual_lambda(self):
     inputs = Input(shape=(32, 32, 3))
     conv = OctaveConv2D(13, kernel_size=3)(inputs)
     pool = octave_dual(conv, lambda: MaxPool2D())
     conv = OctaveConv2D(7, kernel_size=3)(pool)
     pool = octave_dual(conv, lambda: MaxPool2D())
     conv = OctaveConv2D(5, kernel_size=3, ratio_out=0.0)(pool)
     flatten = Flatten()(conv)
     outputs = Dense(units=2, activation='softmax')(flatten)
     model = Model(inputs=inputs, outputs=outputs)
     model.compile(optimizer='adam', loss='sparse_categorical_crossentropy')
     model.summary(line_length=200)
     self._test_fit(model)
Ejemplo n.º 5
0
def oct_srcnn():
    inputs = Input(shape=(256, 256, 3))
    high, low = OctaveConv2D(filters=64, kernel_size=3)(inputs)

    high, low = Activation('relu')(high), Activation('relu')(low)
    high, low = MaxPool2D()(high), MaxPool2D()(low)
    high, low = OctaveConv2D(filters=32, kernel_size=3)([high, low])

    high, low = Activation('relu')(high), Activation('relu')(low)
    high, low = MaxPool2D()(high), MaxPool2D()(low)
    conv = OctaveConv2D(filters=31, kernel_size=3, ratio_out=0.0)([high, low])
    Output = conv
    model = Model(inputs=inputs, outputs=Output)
    return model
Ejemplo n.º 6
0
 def test_raise_dimension_specified(self):
     with self.assertRaises(ValueError):
         inputs = Input(shape=(32, 32, None))
         outputs = OctaveConv2D(13, kernel_size=3, ratio_out=0.0)(inputs)
         model = Model(inputs=inputs, outputs=outputs)
         model.compile(optimizer='adam',
                       loss='sparse_categorical_crossentropy')
     with self.assertRaises(ValueError):
         inputs_high = Input(shape=(32, 32, 3))
         inputs_low = Input(shape=(32, 32, None))
         outputs = OctaveConv2D(13, kernel_size=3,
                                ratio_out=0.0)([inputs_high, inputs_low])
         model = Model(inputs=[inputs_high, inputs_low], outputs=outputs)
         model.compile(optimizer='adam',
                       loss='sparse_categorical_crossentropy')
Ejemplo n.º 7
0
 def test_fit_lower_output(self):
     inputs = keras.layers.Input(shape=(32, 32, 3))
     high, low = OctaveConv2D(13, kernel_size=3)(inputs)
     high, low = keras.layers.MaxPool2D()(high), keras.layers.MaxPool2D()(
         low)
     high, low = OctaveConv2D(7, kernel_size=3)([high, low])
     high, low = keras.layers.MaxPool2D()(high), keras.layers.MaxPool2D()(
         low)
     conv = OctaveConv2D(5, kernel_size=3, ratio_out=1.0)([high, low])
     flatten = keras.layers.Flatten()(conv)
     outputs = keras.layers.Dense(units=2, activation='softmax')(flatten)
     model = keras.models.Model(inputs=inputs, outputs=outputs)
     model.compile(optimizer='adam', loss='sparse_categorical_crossentropy')
     model.summary(line_length=200)
     self._test_fit(model)
Ejemplo n.º 8
0
 def test_raise_octave_divisible(self):
     with self.assertRaises(ValueError):
         inputs = Input(shape=(32, 32, 3))
         outputs = OctaveConv2D(13, kernel_size=3, octave=5,
                                ratio_out=0.0)(inputs)
         model = Model(inputs=inputs, outputs=outputs)
         model.compile(optimizer='adam',
                       loss='sparse_categorical_crossentropy')
Ejemplo n.º 9
0
def _bottleneck(inputs, filters, kernel, t, alpha, s, r=False):
    """Bottleneck
    This function defines a basic bottleneck structure.

    # Arguments
        inputs: Tensor, input tensor of conv layer.
        filters: Integer, the dimensionality of the output space.
        kernel: An integer or tuple/list of 2 integers, specifying the
            width and height of the 2D convolution window.
        t: Integer, expansion factor.
            t is always applied to the input size.
        s: An integer or tuple/list of 2 integers,specifying the strides
            of the convolution along the width and height.Can be a single
            integer to specify the same value for all spatial dimensions.
        alpha: Integer, width multiplier.
        r: Boolean, Whether to use the residuals.

    # Returns
        Output tensor.
    """
    high, low = inputs
    cchannel = int(filters * alpha)

    if not r:
        skip_high = Conv2D(int(filters * (1 - alpha)), 1)(high)
        skip_high = BatchNormalization()(skip_high)
        skip_high = Activation(relu6)(skip_high)

        skip_low = Conv2D(int(filters * alpha), 1)(low)
        skip_low = BatchNormalization()(skip_low)
        skip_low = Activation(relu6)(skip_low)
    else:
        skip_high, skip_low = high, low

    high, low = OctaveConv2D(filters=filters, kernel_size=(3, 3))([high, low])
    high = BatchNormalization()(high)
    high = Activation(relu6)(high)
    low = BatchNormalization()(low)
    low = Activation(relu6)(low)

    # high, low = OctaveConv2D(filters=filters, kernel_size=(3, 3), strides=(s, s))([high, low])
    # high = BatchNormalization()(high)
    # high = Activation(relu6)(high)
    # low = BatchNormalization()(low)
    # low = Activation(relu6)(low)

    if r:
        high = Add()([high, skip_high])
        low = Add()([low, skip_low])

    return high, low
Ejemplo n.º 10
0
 def test_fit_channels_first(self):
     return 'The test needs GPU support'
     inputs = keras.layers.Input(shape=(3, 32, 32))
     high, low = OctaveConv2D(13,
                              kernel_size=3,
                              data_format='channels_first')(inputs)
     high = keras.layers.MaxPool2D(data_format='channels_first')(high)
     low = keras.layers.MaxPool2D(data_format='channels_first')(low)
     high, low = OctaveConv2D(7,
                              kernel_size=3,
                              data_format='channels_first')([high, low])
     high = keras.layers.MaxPool2D(data_format='channels_first')(high)
     low = keras.layers.MaxPool2D(data_format='channels_first')(low)
     conv = OctaveConv2D(5,
                         kernel_size=3,
                         ratio_out=0.0,
                         data_format='channels_first')([high, low])
     flatten = keras.layers.Flatten()(conv)
     outputs = keras.layers.Dense(units=2, activation='softmax')(flatten)
     model = keras.models.Model(inputs=inputs, outputs=outputs)
     model.compile(optimizer='adam', loss='sparse_categorical_crossentropy')
     model.summary(line_length=200)
     self._test_fit(model, data_format='channels_first')
Ejemplo n.º 11
0
(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()

x_train = np.expand_dims(x_train.astype(K.floatx()) / 255, axis=-1)
x_test = np.expand_dims(x_test.astype(K.floatx()) / 255, axis=-1)

y_train, y_test = np.expand_dims(y_train, axis=-1), np.expand_dims(y_test,
                                                                   axis=-1)

train_num = round(x_train.shape[0] * 0.9)
x_train, x_valid = x_train[:train_num, ...], x_train[train_num:, ...]
y_train, y_valid = y_train[:train_num, ...], y_train[train_num:, ...]

# Octave Conv
inputs = Input(shape=(28, 28, 1))
normal = BatchNormalization()(inputs)
high, low = OctaveConv2D(64, kernel_size=3)(normal)
high, low = MaxPool2D()(high), MaxPool2D()(low)
high, low = OctaveConv2D(32, kernel_size=3)([high, low])
conv = OctaveConv2D(16, kernel_size=3, ratio_out=0.0)([high, low])
pool = MaxPool2D()(conv)
flatten = Flatten()(pool)
normal = BatchNormalization()(flatten)
dropout = Dropout(rate=0.4)(normal)
outputs = Dense(units=10, activation='softmax')(dropout)
model = Model(inputs=inputs, outputs=outputs)
model.compile(
    optimizer='adam',
    loss='sparse_categorical_crossentropy',
    metrics=['accuracy'],
)
Ejemplo n.º 12
0
def OctaveMobileNetv2(input_shape, k, alpha=1):
    """MobileNetv2
    This function defines a MobileNetv2 architectures.

    # Arguments
        input_shape: An integer or tuple/list of 3 integers, shape
            of input tensor.
        k: Integer, number of classes.
        alpha: Integer, width multiplier, better in [0.35, 0.50, 0.75, 1.0, 1.3, 1.4].

    # Returns
        MobileNetv2 model.
    """
    inputs = Input(shape=input_shape)
    normal = BatchNormalization()(inputs)
    first_filters = _make_divisible(32 * alpha, 8)
    high, low = OctaveConv2D(first_filters, (3, 3), strides=2)(inputs)

    high, low = _inverted_residual_block([high, low],
                                         16, (3, 3),
                                         t=6,
                                         alpha=alpha,
                                         strides=1,
                                         n=1)
    high, low = _inverted_residual_block([high, low],
                                         24, (3, 3),
                                         t=6,
                                         alpha=alpha,
                                         strides=2,
                                         n=2)
    high, low = _inverted_residual_block([high, low],
                                         32, (3, 3),
                                         t=6,
                                         alpha=alpha,
                                         strides=2,
                                         n=3)
    high, low = _inverted_residual_block([high, low],
                                         64, (3, 3),
                                         t=6,
                                         alpha=alpha,
                                         strides=2,
                                         n=4)
    high, low = _inverted_residual_block([high, low],
                                         96, (3, 3),
                                         t=6,
                                         alpha=alpha,
                                         strides=1,
                                         n=3)
    high, low = _inverted_residual_block([high, low],
                                         160, (3, 3),
                                         t=6,
                                         alpha=alpha,
                                         strides=2,
                                         n=3)
    high, low = _inverted_residual_block([high, low],
                                         320, (3, 3),
                                         t=6,
                                         alpha=alpha,
                                         strides=1,
                                         n=1)

    if alpha > 1.0:
        last_filters = _make_divisible(1280 * alpha, 8)
    else:
        last_filters = 1280

    # high, low = MaxPool2D()(high), MaxPool2D()(low)
    conv = OctaveConv2D(last_filters, kernel_size=1,
                        ratio_out=0.0)([high, low])
    # high_conv = _conv_block(high, last_filters, kernel=(1, 1), strides=(1, 1))
    # low_conv = _conv_block(low, last_filters, kernel=(1, 1), strides=(1, 1))
    # conv = layers.Add()([high_to_high, low_to_high])
    flatten = Flatten()(conv)
    normal = BatchNormalization()(flatten)
    dropout = Dropout(rate=0.4)(normal)
    outputs = Dense(units=10, activation='softmax')(dropout)

    model = Model(inputs, outputs)
    return model
def octHu_PageScan():

    inputs = Input(shape=(__DEF_HEIGHT, __DEF_WIDTH, input_channels))
    conv_1 = OctaveConv2D(filters=init_channels,
                          kernel_size=3,
                          ratio_out=0.5,
                          activation='relu',
                          kernel_initializer='he_uniform',
                          name='octave_conv_1')(inputs)
    conv_1 = OctaveConv2D(filters=init_channels,
                          kernel_size=3,
                          ratio_out=0.5,
                          activation='relu',
                          kernel_initializer='he_uniform',
                          name='octave_conv_2')(conv_1)
    pool_1 = octPooling('max', 2, 2, 'same', 'oct', '/avg_pool', conv_1)

    conv_2 = OctaveConv2D(filters=init_channels * 2,
                          kernel_size=3,
                          ratio_out=0.5,
                          activation='relu',
                          kernel_initializer='he_uniform',
                          name='octave_conv_3')(pool_1)
    conv_2 = OctaveConv2D(filters=init_channels * 2,
                          kernel_size=3,
                          ratio_out=0.5,
                          activation='relu',
                          kernel_initializer='he_uniform',
                          name='octave_conv_4')(conv_2)
    pool_2 = octPooling('max', 2, 2, 'same', 'oct', '/avg_pool_2', conv_2)

    conv_3 = OctaveConv2D(filters=init_channels * 4,
                          kernel_size=3,
                          ratio_out=0.5,
                          activation='relu',
                          kernel_initializer='he_uniform',
                          name='octave_conv_5')(pool_2)
    conv_3 = OctaveConv2D(filters=init_channels * 4,
                          kernel_size=3,
                          ratio_out=0.5,
                          activation='relu',
                          kernel_initializer='he_uniform',
                          name='octave_conv_6')(conv_3)
    pool_3 = octPooling('max', 2, 2, 'same', 'oct', '/avg_pool_3', conv_3)

    conv_4 = OctaveConv2D(filters=init_channels * 8,
                          kernel_size=3,
                          ratio_out=0.5,
                          activation='relu',
                          kernel_initializer='he_uniform',
                          name='octave_conv_7')(pool_3)
    conv_4 = OctaveConv2D(filters=init_channels * 8,
                          kernel_size=3,
                          ratio_out=0.5,
                          activation='relu',
                          kernel_initializer='he_uniform',
                          name='octave_conv_8')(conv_4)
    pool_4 = octPooling('max', 2, 2, 'same', 'oct', '/avg_pool_4', conv_4)

    conv_5 = OctaveConv2D(filters=init_channels * 16,
                          kernel_size=3,
                          ratio_out=0.5,
                          activation='relu',
                          kernel_initializer='he_uniform',
                          name='octave_conv_9')(pool_4)
    conv_5 = OctaveConv2D(filters=init_channels * 16,
                          kernel_size=3,
                          ratio_out=0.5,
                          activation='relu',
                          kernel_initializer='he_uniform',
                          name='octave_conv_10')(conv_5)

    up_6 = octUpsize(conv_5, 2)
    up_6 = [
        Concatenate()([up_6[0], conv_4[0]]),
        Concatenate()([up_6[1], conv_4[1]])
    ]
    conv_6 = OctaveConv2D(filters=init_channels * 8,
                          kernel_size=3,
                          ratio_out=0.5,
                          activation='relu',
                          kernel_initializer='he_uniform',
                          name='octave_conv_11')(up_6)
    conv_6 = OctaveConv2D(filters=init_channels * 8,
                          kernel_size=3,
                          ratio_out=0.5,
                          activation='relu',
                          kernel_initializer='he_uniform',
                          name='octave_conv_12')(conv_6)

    up_7 = octUpsize(conv_6, 2)
    up_7 = [
        Concatenate()([up_7[0], conv_3[0]]),
        Concatenate()([up_7[1], conv_3[1]])
    ]
    conv_7 = OctaveConv2D(filters=init_channels * 4,
                          kernel_size=3,
                          ratio_out=0.5,
                          activation='relu',
                          kernel_initializer='he_uniform',
                          name='octave_conv_13')(up_7)
    conv_7 = OctaveConv2D(filters=init_channels * 4,
                          kernel_size=3,
                          ratio_out=0.5,
                          activation='relu',
                          kernel_initializer='he_uniform',
                          name='octave_conv_14')(conv_7)

    up_8 = octUpsize(conv_7, 2)
    up_8 = [
        Concatenate()([up_8[0], conv_2[0]]),
        Concatenate()([up_8[1], conv_2[1]])
    ]
    conv_8 = OctaveConv2D(filters=init_channels * 2,
                          kernel_size=3,
                          ratio_out=0.5,
                          activation='relu',
                          kernel_initializer='he_uniform',
                          name='octave_conv_15')(up_8)
    conv_8 = OctaveConv2D(filters=init_channels * 2,
                          kernel_size=3,
                          ratio_out=0.5,
                          activation='relu',
                          kernel_initializer='he_uniform',
                          name='octave_conv_16')(conv_8)

    up_9 = octUpsize(conv_8, 2)
    up_9 = [
        Concatenate()([up_9[0], conv_1[0]]),
        Concatenate()([up_9[1], conv_1[1]])
    ]
    conv_9 = OctaveConv2D(filters=init_channels,
                          kernel_size=3,
                          ratio_out=0.5,
                          activation='relu',
                          kernel_initializer='he_uniform',
                          name='octave_conv_17')(up_9)
    conv_9 = OctaveConv2D(filters=init_channels,
                          kernel_size=3,
                          ratio_out=0.5,
                          activation='relu',
                          kernel_initializer='he_uniform',
                          name='octave_conv_18')(conv_9)

    conv_10 = OctaveConv2D(filters=input_channels,
                           ratio_out=0,
                           kernel_size=1,
                           activation='sigmoid',
                           kernel_initializer='he_uniform',
                           name='output')(conv_9)

    model = Model(inputs=inputs, outputs=conv_10)
    model.compile(optimizer=Adam(lr=learning_rate),
                  loss=dice_coef_loss,
                  metrics=[dice_coef])

    #model.summary()

    return model
Ejemplo n.º 14
0
def o_unet(pretrained_weights=None, input_size=(800, 600, 1)):

    inputs = Input(input_size)
    # downsampling for low frequencies
    low = layers.AveragePooling2D(2)(inputs)

    high1, low1 = OctaveConv2D(64)([inputs, low])

    high1 = layers.BatchNormalization()(high1)
    high1 = layers.Activation("relu")(high1)
    low1 = layers.BatchNormalization()(low1)
    low1 = layers.Activation("relu")(low1)

    high1, low1 = OctaveConv2D(64)([high1, low1])

    high1 = layers.BatchNormalization()(high1)
    high1 = layers.Activation("relu")(high1)
    low1 = layers.BatchNormalization()(low1)
    low1 = layers.Activation("relu")(low1)

    pool1high = layers.MaxPooling2D(2)(high1)
    pool1low = layers.MaxPooling2D(2)(low1)

    high2, low2 = OctaveConv2D(128)([pool1high, pool1low])

    high2 = layers.BatchNormalization()(high2)
    high2 = layers.Activation("relu")(high2)
    low2 = layers.BatchNormalization()(low2)
    low2 = layers.Activation("relu")(low2)

    high2, low2 = OctaveConv2D(128)([high2, low2])

    high2 = layers.BatchNormalization()(high2)
    high2 = layers.Activation("relu")(high2)
    low2 = layers.BatchNormalization()(low2)
    low2 = layers.Activation("relu")(low2)

    pool2high = layers.MaxPooling2D(2)(high2)
    pool2low = layers.MaxPooling2D(2)(low2)

    high3, low3 = OctaveConv2D(256)([pool2high, pool2low])

    high3 = layers.BatchNormalization()(high3)
    high3 = layers.Activation("relu")(high3)
    low3 = layers.BatchNormalization()(low3)
    low3 = layers.Activation("relu")(low3)

    high3, low3 = OctaveConv2D(256)([high3, low3])

    high3 = layers.BatchNormalization()(high3)
    high3 = layers.Activation("relu")(high3)
    low3 = layers.BatchNormalization()(low3)
    low3 = layers.Activation("relu")(low3)

    pool3high = layers.MaxPooling2D(2)(high3)
    pool3low = layers.MaxPooling2D(2)(low3)

    high4, low4 = OctaveConv2D(512)([pool3high, pool3low])

    high4 = layers.BatchNormalization()(high4)
    high4 = layers.Activation("relu")(high4)
    low4 = layers.BatchNormalization()(low4)
    low4 = layers.Activation("relu")(low4)

    high4, low4 = OctaveConv2D(512)([high4, low4])

    high4 = layers.BatchNormalization()(high4)
    high4 = layers.Activation("relu")(high4)
    low4 = layers.BatchNormalization()(low4)
    low4 = layers.Activation("relu")(low4)

    pool4high = layers.MaxPooling2D(2)(high4)
    pool4low = layers.MaxPooling2D(2)(low4)

    high5, low5 = OctaveConv2D(1024)([pool4high, pool4low])

    high5 = layers.BatchNormalization()(high5)
    high5 = layers.Activation("relu")(high5)
    low5 = layers.BatchNormalization()(low5)
    low5 = layers.Activation("relu")(low5)

    high5 = Dropout(0.4)(high5)
    low5 = Dropout(0.4)(low5)

    high5, low5 = OctaveConv2D(1024)([high5, low5])
    high5 = layers.BatchNormalization()(high5)
    high5 = layers.Activation("relu")(high5)
    low5 = layers.BatchNormalization()(low5)
    low5 = layers.Activation("relu")(low5)

    high5 = Dropout(0.4)(high5)
    low5 = Dropout(0.4)(low5)

    uphigh6, uplow6 = OctaveConv2D(512, use_transpose=True,
                                   strides=(2, 2))([high5, low5])

    uphigh6 = layers.BatchNormalization()(uphigh6)
    uphigh6 = layers.Activation("relu")(uphigh6)
    uplow6 = layers.BatchNormalization()(uplow6)
    uplow6 = layers.Activation("relu")(uplow6)

    merge6high = concatenate([high4, uphigh6], axis=3)
    merge6low = concatenate([low4, uplow6], axis=3)

    high6, low6 = OctaveConv2D(512)([merge6high, merge6low])

    high6 = layers.BatchNormalization()(high6)
    high6 = layers.Activation("relu")(high6)
    low6 = layers.BatchNormalization()(low6)
    low6 = layers.Activation("relu")(low6)

    high6, low6 = OctaveConv2D(512)([high6, low6])

    high6 = layers.BatchNormalization()(high6)
    high6 = layers.Activation("relu")(high6)
    low6 = layers.BatchNormalization()(low6)
    low6 = layers.Activation("relu")(low6)

    uphigh7, uplow7 = OctaveConv2D(256, use_transpose=True,
                                   strides=(2, 2))([high6, low6])

    uphigh7 = layers.BatchNormalization()(uphigh7)
    uphigh7 = layers.Activation("relu")(uphigh7)
    uplow7 = layers.BatchNormalization()(uplow7)
    uplow7 = layers.Activation("relu")(uplow7)

    merge7high = concatenate([high3, uphigh7], axis=3)
    merge7low = concatenate([low3, uplow7], axis=3)

    high7, low7 = OctaveConv2D(256)([merge7high, merge7low])

    high7 = layers.BatchNormalization()(high7)
    high7 = layers.Activation("relu")(high7)
    low7 = layers.BatchNormalization()(low7)
    low7 = layers.Activation("relu")(low7)

    high7, low7 = OctaveConv2D(256)([high7, low7])

    high7 = layers.BatchNormalization()(high7)
    high7 = layers.Activation("relu")(high7)
    low7 = layers.BatchNormalization()(low7)
    low7 = layers.Activation("relu")(low7)

    uphigh8, uplow8 = OctaveConv2D(128, use_transpose=True,
                                   strides=(2, 2))([high7, low7])

    uphigh8 = layers.BatchNormalization()(uphigh8)
    uphigh8 = layers.Activation("relu")(uphigh8)
    uplow8 = layers.BatchNormalization()(uplow8)
    uplow8 = layers.Activation("relu")(uplow8)

    merge8high = concatenate([high2, uphigh8], axis=3)
    merge8low = concatenate([low2, uplow8], axis=3)

    high8, low8 = OctaveConv2D(128)([merge8high, merge8low])

    high8 = layers.BatchNormalization()(high8)
    high8 = layers.Activation("relu")(high8)
    low8 = layers.BatchNormalization()(low8)
    low8 = layers.Activation("relu")(low8)

    high8, low8 = OctaveConv2D(128)([high8, low8])

    high8 = layers.BatchNormalization()(high8)
    high8 = layers.Activation("relu")(high8)
    low8 = layers.BatchNormalization()(low8)
    low8 = layers.Activation("relu")(low8)

    uphigh9, uplow9 = OctaveConv2D(64, use_transpose=True,
                                   strides=(2, 2))([high8, low8])

    uphigh9 = layers.BatchNormalization()(uphigh9)
    uphigh9 = layers.Activation("relu")(uphigh9)
    uplow9 = layers.BatchNormalization()(uplow9)
    uplow9 = layers.Activation("relu")(uplow9)

    merge9high = concatenate([high1, uphigh9], axis=3)
    merge9low = concatenate([low1, uplow9], axis=3)

    high9, low9 = OctaveConv2D(64)([merge9high, merge9low])

    high9 = layers.BatchNormalization()(high9)
    high9 = layers.Activation("relu")(high9)
    low9 = layers.BatchNormalization()(low9)
    low9 = layers.Activation("relu")(low9)

    high9, low9 = OctaveConv2D(64)([high9, low9])

    high9 = layers.BatchNormalization()(high9)
    high9 = layers.Activation("relu")(high9)
    low9 = layers.BatchNormalization()(low9)
    low9 = layers.Activation("relu")(low9)

    conv9 = OctaveConv2D(32, ratio_out=0.0)([high9, low9])

    conv9 = layers.Activation("sigmoid")(conv9)
    conv10 = layers.Conv2D(1, 1, activation='sigmoid')(conv9)

    model = Model(inputs=[inputs], outputs=[conv10])
    return model