Esempio n. 1
0
def mobilenetv2_yolo_body(inputs, num_anchors, num_classes, alpha=1.0):
    mobilenetv2 = mobilenet_v2(default_batchnorm_momentum=0.9,
                               alpha=alpha,
                               input_tensor=inputs,
                               include_top=False,
                               weights='imagenet')
    x, y1 = make_last_layers_mobilenet(mobilenetv2.output, 17, 512,
                                       num_anchors * (num_classes + 5))
    x = compose(
        tf.keras.layers.Conv2D(256,
                               kernel_size=1,
                               padding='same',
                               use_bias=False,
                               name='block_20_conv'),
        tf.keras.layers.BatchNormalization(momentum=0.9, name='block_20_BN'),
        tf.keras.layers.ReLU(6., name='block_20_relu6'),
        tf.keras.layers.UpSampling2D(2))(x)
    x = tf.keras.layers.Concatenate()([
        x,
        MobilenetConv2D(
            (1, 1), alpha,
            384)(mobilenetv2.get_layer('block_12_project_BN').output)
    ])
    x, y2 = make_last_layers_mobilenet(x, 21, 256,
                                       num_anchors * (num_classes + 5))
    x = compose(
        tf.keras.layers.Conv2D(128,
                               kernel_size=1,
                               padding='same',
                               use_bias=False,
                               name='block_24_conv'),
        tf.keras.layers.BatchNormalization(momentum=0.9, name='block_24_BN'),
        tf.keras.layers.ReLU(6., name='block_24_relu6'),
        tf.keras.layers.UpSampling2D(2))(x)
    x = tf.keras.layers.Concatenate()([
        x,
        MobilenetConv2D(
            (1, 1), alpha,
            128)(mobilenetv2.get_layer('block_5_project_BN').output)
    ])
    x, y3 = make_last_layers_mobilenet(x, 25, 128,
                                       num_anchors * (num_classes + 5))
    y1 = tf.keras.layers.Lambda(lambda y: tf.reshape(y, [
        -1, tf.shape(y)[1],
        tf.shape(y)[2], num_anchors, num_classes + 5
    ]),
                                name='y1')(y1)
    y2 = tf.keras.layers.Lambda(lambda y: tf.reshape(y, [
        -1, tf.shape(y)[1],
        tf.shape(y)[2], num_anchors, num_classes + 5
    ]),
                                name='y2')(y2)
    y3 = tf.keras.layers.Lambda(lambda y: tf.reshape(y, [
        -1, tf.shape(y)[1],
        tf.shape(y)[2], num_anchors, num_classes + 5
    ]),
                                name='y3')(y3)
    return AdvLossModel(mobilenetv2.inputs, [y1, y2, y3])
def mobilenetv2(inputs, alpha, classes):
    """MobilenetV2 wrapper function
    
    Arguments:
        inputs {np.array} -- [train images]
        alpha {float} -- [controls the width of the network. This is known as the
        width multiplier in the MobileNetV2 paper.
            - If `alpha` < 1.0, proportionally decreases the number
                of filters in each layer.
            - If `alpha` > 1.0, proportionally increases the number
                of filters in each layer.
            - If `alpha` = 1, default number of filters from the paper
                 are used at each layer.]
        classes {int} -- [classes total number]
    
    Returns:
        [tf.keras.Model] -- [mobilenetv2 model]
    """
    return mobilenet_v2(default_batchnorm_momentum=0.9,
                        alpha=alpha,
                        input_tensor=inputs,
                        classes=classes)
def mobilenetv2(inputs, alpha, classes):
    return mobilenet_v2(default_batchnorm_momentum=0.9,
                        alpha=alpha,
                        input_tensor=inputs,
                        classes=classes)