def build_model(self):
        print('\n----------BUILD MODEL----------\n')
        inputs = Input(shape=(IMG_WIDTH, IMG_HEIGHT, IMG_CHANNEL))

        # ----------Block 1-----------
        print('\n---Block 1---')
        x = Conv2D(filters=32, kernel_size=(3, 3),
                   padding='same', activation='relu')(inputs)
        print('Conv2D:', x.get_shape().as_list())
        x = MaxPool2D(pool_size=(2, 2), strides=2)(x)
        print('MaxPool2D:', x.get_shape().as_list())

        # ----------Block 2-----------
        print('\n---Block 2---')
        x = Conv2D(filters=64, kernel_size=(3, 3),
                   padding='same', activation='relu')(x)
        print('Conv2D:', x.get_shape().as_list())
        x = MaxPool2D(pool_size=(2, 2), strides=2)(x)
        print('MaxPool2D:', x.get_shape().as_list())
        x = BatchNormalization()(x)

        # ----------Block 3-----------
        print('\n---Block 3---')
        x = Conv2D(filters=64, kernel_size=(3, 3),
                   padding='same', activation='relu')(x)
        print('Conv2D:', x.get_shape().as_list())
        x = MaxPool2D(pool_size=(2, 2), strides=2)(x)
        print('MaxPool2D:', x.get_shape().as_list())

        # ----------Merge 2 CNNs----------
        print('\n---Merge 2 CNNs---')
        detector = x
        detector_shape = detector.get_shape().as_list()
        extractor = x
        extractor_shape = extractor.get_shape().as_list()

        detector = Reshape([detector_shape[1] * detector_shape[2], detector_shape[3]])(detector)
        print('Detector:', detector.get_shape().as_list())
        extractor = Reshape([extractor_shape[1] * extractor_shape[2], extractor_shape[3]])(extractor)
        print('Extractor:', extractor.get_shape().as_list())

        bcnn = Lambda(_outer_product)([detector, extractor])
        print('Outer product:', bcnn.get_shape().as_list())

        bcnn = Reshape([detector_shape[3] * extractor_shape[3]])(bcnn)
        print('Reshape:', bcnn.get_shape().as_list())
        bcnn = Lambda(_signed_sqrt)(bcnn)
        print('Signed square root:', bcnn.get_shape().as_list())
        bcnn = Lambda(_l2_normalise)(bcnn)
        print('L2 normalisation:', bcnn.get_shape().as_list())

        # ----------Fully Connected----------
        bcnn = Dense(units=N_CLASSES, activation='softmax')(bcnn)
        print('Softmax:', bcnn.get_shape().as_list())

        bcnn_model = Model(inputs=[inputs], outputs=[bcnn])

        return bcnn_model
Ejemplo n.º 2
0
def AttentionResNet92(shape=(224, 224, 3),
                      n_channels=64,
                      n_classes=100,
                      dropout=0,
                      regularization=0.01):
    """
    Attention-92 ResNet
    https://arxiv.org/abs/1704.06904
    """
    regularizer = l2(regularization)

    input_ = Input(shape=shape)
    x = Conv2D(n_channels, (7, 7), strides=(2, 2),
               padding='same')(input_)  # 112x112
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = MaxPool2D(pool_size=(3, 3), strides=(2, 2), padding='same')(x)  # 56x56

    x = residual_block(x, output_channels=n_channels * 4)  # 56x56
    x = attention_block(x, encoder_depth=3)  # bottleneck 7x7

    x = residual_block(x, output_channels=n_channels * 8, stride=2)  # 28x28
    x = attention_block(x, encoder_depth=2)  # bottleneck 7x7
    x = attention_block(x, encoder_depth=2)  # bottleneck 7x7

    x = residual_block(x, output_channels=n_channels * 16, stride=2)  # 14x14
    x = attention_block(x, encoder_depth=1)  # bottleneck 7x7
    x = attention_block(x, encoder_depth=1)  # bottleneck 7x7
    x = attention_block(x, encoder_depth=1)  # bottleneck 7x7

    x = residual_block(x, output_channels=n_channels * 32, stride=2)  # 7x7
    x = residual_block(x, output_channels=n_channels * 32)
    x = residual_block(x, output_channels=n_channels * 32)

    pool_size = (x.get_shape()[1].value, x.get_shape()[2].value)
    x = AveragePooling2D(pool_size=pool_size, strides=(1, 1))(x)
    x = Flatten()(x)
    if dropout:
        x = Dropout(dropout)(x)
    output = Dense(n_classes,
                   kernel_regularizer=regularizer,
                   activation='softmax')(x)

    model = Model(input_, output)
    return model
 def f(x):
     for num_filter, kernel, stride in zip(filters, kernels, strides):
         x = Conv2D(num_filter,
                    kernel_size=kernel,
                    padding='same',
                    activation='relu',
                    strides=stride)(x)
         x = BatchNormalization()(x)
         self.reconstruction_shape += [x.get_shape().as_list()]
     return x
Ejemplo n.º 4
0
def _transmit_block(x, is_last):
    bn_scale = PARAMS['bn_scale']
    activation = PARAMS['activation']
    kernel_initializer = PARAMS['kernel_initializer']
    weight_decay = PARAMS['weight_decay']
    compression = PARAMS['compression']

    x = BatchNormalization(scale=bn_scale, axis=-1)(x)
    x = activation()(x)
    if is_last:
        x = GlobalAvgPool3D()(x)
    else:
        *_, f = x.get_shape().as_list()
        x = Conv3D(f // compression, kernel_size=(1, 1, 1), padding='same', use_bias=True,
                   kernel_initializer=kernel_initializer,
                   kernel_regularizer=l2_penalty(weight_decay))(x)
        x = AveragePooling3D((2, 2, 2), padding='valid')(x)
    return x