Beispiel #1
0
    def build(inputShape):
        # initialize the base model
        baseModel = MobileNetV2(weights="imagenet",
                                include_top=False,
                                input_tensor=Input(shape=inputShape))

        # build additional layer on top of the baseModel
        headModel = baseModel.output
        headModel = AveragePooling2D(pool_size=(7, 7))(headModel)
        headModel = Flatten(name="flatten")(headModel)
        headModel = Dense(128, activation="relu")(headModel)
        headModel = Dropout(0.5)(headModel)
        headModel = Dense(2, activation="softmax")(headModel)

        # build the final model
        model = Model(inputs=baseModel.input, outputs=headModel)

        # freeze the weights of the baseModel
        for layer in baseModel.layers:
            layer.trainable = False

        return model
def Create_Model(inpt=(128, 128, 3),
                 train=True,
                 num_classes=10,
                 embedding_size=128,
                 dropout_keep_prob=0.4):
    inpt = tf.keras.Input(inpt)
    base_model = MobileNetV2(include_top=True, input_tensor=inpt)
    out = base_model.get_layer('global_average_pooling2d').output
    x = Dropout(1.0 - dropout_keep_prob, name='Dropout')(out)
    # 全连接层到128
    # 128
    x = Dense(embedding_size, use_bias=False, name='Bottleneck')(x)
    x = BatchNormalization(momentum=0.995,
                           epsilon=0.001,
                           scale=False,
                           name='BatchNorm_Bottleneck')(x)

    # 创建模型
    model = tf.keras.Model(inpt, x, name='mobilenet')

    logits = Dense(num_classes)(model.output)
    softmax = Activation("softmax", name="Softmax")(logits)

    normalize = Lambda(lambda x: K.l2_normalize(x, axis=1),
                       name="Embedding")(model.output)
    combine_model = tf.keras.Model(inpt, [softmax, normalize])

    x = Lambda(lambda x: K.l2_normalize(x, axis=1),
               name="Embedding")(model.output)
    model = tf.keras.Model(inpt, x)

    if train:
        return combine_model, model
    else:
        return model


# model= Create_Model(train=False)
# model.summary()
def hModel():
    # load the mobile network
    baseModel = MobileNetV2(weights="imagenet",
                            include_top=False,
                            input_tensor=Input(shape=(224, 224, 3)))

    # construction of the head of model htat will be placed on top of the model
    headModel = baseModel.output
    headModel = AveragePooling2D(pool_size=(7, 7))(headModel)
    headModel = Flatten(name="flatten")(headModel)
    headModel = Dense(128, activation="relu")(headModel)
    headModel = Dropout(0.5)(headModel)
    headModel = Dense(2, activation="softmax")(headModel)

    # place the FC model on top
    model = Model(inputs=baseModel.input, outputs=headModel)

    # add the rest o fmodel to the final model
    for layer in baseModel.layers:
        layer.trainable = False

    return model
    def __init__(self,
                 num_conv_layers,
                 num_conv_filter,
                 conv_filter_size,
                 feat_extractor="mobilenet_v2",
                 optimiser='adagrad',
                 learning_rate=0.0001,
                 train_feat_extractor=False):
        args, _, _, values = inspect.getargvalues(inspect.currentframe())
        values.pop("self")

        for arg, val in values.items():
            setattr(self, arg, val)

        if isinstance(self.conv_filter_size, int):
            self.conv_filter_size_list = [self.conv_filter_size
                                          ] * self.num_conv_layers
        if isinstance(self.conv_filter_size, (list, tuple, np.ndarray, set)):
            if len(self.conv_filter_size) < self.num_conv_layers:
                raise ValueError(
                    "The filter size for all convolution layers have not been specified. Please ensure the conv_filter_size has same numebr of elements as num_conv_layers"
                )
            self.conv_filter_size_list = self.conv_filter_size

        if isinstance(self.num_conv_filter, int):
            self.conv_filter_num_list = [self.num_conv_filter
                                         ] * self.num_conv_layers
        if isinstance(self.num_conv_filter, (list, tuple, np.ndarray, set)):
            if len(self.num_conv_filter) < self.num_conv_layers:
                raise ValueError(
                    "The number of filters for all convolution layers have not been specified. Please ensure the conv_filter_size has same numeber of elements as num_conv_layers"
                )
            self.conv_filter_num_list = self.num_conv_filter
        if self.feat_extractor == 'mobilenet_v2':
            self.feat_extractor = MobileNetV2(include_top=False)
        else:
            self.feat_extractor = Xception(include_top=False)
        self.model = self.build_model()
Beispiel #5
0
def my_model(input_shape, nclass, dropout, learning_rate=0.001):
    base_model = MobileNetV2(weights="imagenet", include_top=False)

    model_input = L.Input(input_shape)
    x = base_model(model_input)
    x = L.GlobalAveragePooling2D()(x)

    y = L.Dense(512, activation='relu')(x)
    y = L.Dropout(dropout)(y)
    y = L.Dense(512, activation='relu')(y)
    y = L.Dropout(dropout)(y)

    y_h = L.Dense(nclass, activation='softmax', name='Id')(y)

    model = Model(inputs=model_input, outputs=y_h)

    optimizer = Adam(learning_rate=learning_rate)

    model.compile(loss='categorical_crossentropy',
                  optimizer=optimizer,
                  metrics='accuracy')

    return model
Beispiel #6
0
def make_model(model_name='mobilenet_v2', weights=''):
    
    try:
        if (model_name == 'mobilenet_v2'):
            base_model = MobileNetV2(weights='imagenet',input_shape=(224, 224, 3), include_top=False)

            x = base_model.output

            x = GlobalAveragePooling2D()(x)

            x = Dense(1024, activation='relu')(x)

            predictions = Dense(2, activation='softmax')(x)

            model = Model(inputs=base_model.input, outputs=predictions)

            model.load_weights(weights)

        return model

    except:
        print('ERROR::MODEL')
        return
def build_model():
    # load the MobileNetV2 network, ensuring the head FC layer sets are left off
    baseModel = MobileNetV2(weights="imagenet", include_top=False,
        input_tensor=Input(shape=(224, 224, 3)))

    # construct the head of the model that will be placed on top of the the base model
    headModel = baseModel.output
    headModel = AveragePooling2D(pool_size=(7, 7))(headModel)
    headModel = Flatten(name="flatten")(headModel)
    headModel = Dense(128, activation="relu")(headModel)
    headModel = Dropout(0.5)(headModel)
    headModel = Dense(2, activation="softmax")(headModel)

    # place the head FC model on top of the base model
    # (this will become the actual model we will train)
    model = Model(inputs=baseModel.input, outputs=headModel)

    # loop over all layers in the base model and freeze them so they will
    # *not* be updated during the first training process
    for layer in baseModel.layers:
        layer.trainable = False

    return model
    def get_backbone(self, output_index):
        """
        Build a submodel that return the backbone required layers.

        Args:
            output_index (list): Index of layers that will be backbone output

        Returns:
            MobileNet Model with multiple outputs
        """
        # Get MobileNet
        backbone = MobileNetV2(include_top=False, weights='imagenet')

        # Find the outputs from backbone middle layers
        outputs = []
        for backbone_index in output_index:
            outputs.append(backbone.layers[backbone_index].output)

        # Build Model
        return Model(
            inputs=backbone.inputs,
            outputs=outputs,
        )
Beispiel #9
0
    def build(config, classes):

        # Build transfer learning network
        base_model = MobileNetV2(weights="imagenet",
                                 include_top=False,
                                 input_shape=config.input_shape)
        for layer in base_model.layers:
            layer.trainable = False

        # Build new top
        X = base_model.output
        X = MaxPooling2D((7, 7))(
            X)  # Alternative X = DepthwiseConv2D((7, 7), activation='relu')(X)
        X = Flatten()(X)
        X = Dropout(0.5)(X)
        X = Dense(64, activation='relu')(X)
        X = Dropout(0.5)(X)
        X = Dense(classes,
                  activation='softmax' if classes > 1 else 'sigmoid')(X)
        model = Model(inputs=base_model.input, outputs=X)
        model.is_in_warmup = True

        return model
Beispiel #10
0
    def __generate_and_compile_model(self):
        baseModel = MobileNetV2(weights="imagenet",
                                include_top=False,
                                input_tensor=Input(shape=SHAPE))

        headModel = baseModel.output
        headModel = AveragePooling2D(pool_size=(7, 7))(headModel)
        headModel = Flatten(name="flatten")(headModel)
        headModel = Dense(128, activation="relu")(headModel)
        headModel = Dropout(0.5)(headModel)
        headModel = Dense(2, activation="softmax")(headModel)

        self.model = Model(inputs=baseModel.input, outputs=headModel)

        for layer in baseModel.layers:
            layer.trainable = False

        opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)
        self.model.compile(loss="binary_crossentropy",
                           optimizer=opt,
                           metrics=["accuracy"])

        return
Beispiel #11
0
def create_model(input_shape):
    # load MobileNetV2
    model = MobileNetV2(input_shape=input_shape)
    # remove the last fully connected layer
    model.layers.pop()
    # freeze all the weights of the model except the last 4 layers
    for layer in model.layers[:-4]:
        layer.trainable = False
    # construct our own fully connected layer for classification
    output = Dense(num_classes, activation="softmax")
    # connect that dense layer to the model
    output = output(model.layers[-1].output)

    model = Model(inputs=model.inputs, outputs=output)

    # print the summary of the model architecture
    model.summary()

    # training the model using adam optimizer
    model.compile(loss="categorical_crossentropy",
                  optimizer="adam",
                  metrics=["accuracy"])
    return model
Beispiel #12
0
    def create_model(self):
        """https://github.com/atulapra/Emotion-detection/blob/master/src/emotions.py"""

        baseModel = MobileNetV2(weights="imagenet", include_top=False,
                                input_tensor=Input(shape=(160, 160, 3)))
        headModel = baseModel.output
        headModel = AveragePooling2D(pool_size=(5, 5))(headModel)
        headModel = Flatten(name="flatten")(headModel)
        headModel = Dense(128, activation="relu")(headModel)
        headModel = Dropout(0.5)(headModel)
        headModel = Dense(3, activation="softmax")(headModel)

        self.model = Model(inputs=baseModel.input, outputs=headModel)

        for layer in baseModel.layers:
            layer.trainable = False

        # compile our model
        self.model.compile(loss='categorical_crossentropy',
                           optimizer=Adam(lr=0.0003, decay=1e-6),
                           metrics=['accuracy'])

        return self.model.summary()
Beispiel #13
0
def mobilenet_model(img_shape=(224, 224, 3)):
    """
    Downloads mobilenet model without imagenet weights and adds the last layers 
    for classification.

    Parameters
    ----------
    img_shape : tuple, optional
        DESCRIPTION. The default is (224,224,3).
        Shape of the input image for the model. Default is 224,224,3 as it is the
        default for MobileNet.
    Returns
    -------
    model : keras model
        Returns MobileNet model with classification layers added.

    """
    base_model = MobileNetV2(input_shape=img_shape,
                             include_top=False,
                             weights=None)

    MN = base_model.output

    MN = GlobalAveragePooling2D()(MN)
    MN = Dense(512, activation='relu')(MN)
    MN = Dropout(0.5)(MN)
    MN = Dense(256, activation='relu')(MN)
    MN = Dropout(0.5)(MN)
    MN = Dense(28, activation='softmax')(MN)

    model = Model(base_model.input, MN)
    model.compile(optimizer=RMSprop(lr=0.0001),
                  loss='categorical_crossentropy',
                  metrics=['accuracy', Precision(),
                           Recall()])
    model.summary()
    return model
Beispiel #14
0
    def compile_model(self, model_name, fine=0):
        if model_name == 'ResNet50':
            self.base_model = ResNet50(pooling=self.RESNET50_POOLING_AVERAGE,
                                       include_top=False,
                                       weights='imagenet')
            self.model = tf.keras.Sequential([
                self.base_model,
                keras.layers.Dense(self.NUM_CLASSES,
                                   activation=self.DENSE_LAYER_ACTIVATION)
            ])
        elif model_name == 'MobileNet v2':
            self.base_model = MobileNetV2(input_shape=self.IMAGE_SHAPE,
                                          include_top=False,
                                          weights='imagenet')
            self.model = tf.keras.Sequential([
                self.base_model,
                keras.layers.GlobalAveragePooling2D(),
                keras.layers.Dense(self.NUM_CLASSES,
                                   activation=self.DENSE_LAYER_ACTIVATION)
            ])

        # Fine Tuning
        if fine:
            self.base_model.trainable = True
            # Fine tune from this layer onwards
            self.fine_tune_at = fine

            # Freeze all the layers before the `fine_tune_at` layer
            for layer in self.base_model.layers[:self.fine_tune_at]:
                layer.trainable = False

        else:
            self.base_model.trainable = False

        self.model.compile(optimizer=self.SGD,
                           loss=self.OBJECTIVE_FUNCTION,
                           metrics=self.LOSS_METRICS)
Beispiel #15
0
def build_model(img_shape, num_classes) -> Model:
    base_model = MobileNetV2(include_top=False,
                             weights="imagenet",
                             input_shape=IMAGENET_SHAPE)

    num_layers = len(base_model.layers)
    print(f"Number of layers in the base model: {num_layers}")
    fine_tune_at = num_layers - 10
    for layer in base_model.layers[:fine_tune_at]:
        layer.trainable = False

    input_img = Input(shape=img_shape)
    x = Rescaling(scale=2.0, offset=-1.0)(input_img)
    x = Resizing(height=IMAGENET_SIZE, width=IMAGENET_SIZE)(x)
    x = base_model(x)
    x = GlobalAveragePooling2D()(x)
    x = Dense(units=num_classes)(x)
    y_pred = Activation("softmax")(x)

    model = Model(inputs=[input_img], outputs=[y_pred])

    model.summary()

    return model
Beispiel #16
0
    def _encoder(self):
        mobile_net_base_model = MobileNetV2(self.input_shape,
                                            include_top=False)

        # Use the activations of these layers
        layer_names = [
            'block_1_expand_relu',  # 120x160x96
            'block_3_expand_relu',  # 60x80x144
            'block_6_expand_relu',  # 30x40x192
            'block_13_expand_relu',  # 15x20x576
            'block_16_project',  # 8x10x320
        ]

        layers = [
            mobile_net_base_model.get_layer(name).output
            for name in layer_names
        ]

        # Create the feature extraction model
        down_stack = Model(inputs=mobile_net_base_model.input, outputs=layers)

        down_stack.trainable = False

        return down_stack
Beispiel #17
0
def Encoder(input_shape=(128, 128, 3)):

    base_model = MobileNetV2(input_shape=input_shape, include_top=False)

    # Use the activations of these layers for the Skip connection
    layer_names = [
        'block_1_expand_relu',  # OUTPUT_SHAPE: (BS, 64, 64, 96)
        'block_3_expand_relu',  # OUTPUT_SHAPE: (BS, 32, 32, 144)
        'block_6_expand_relu',  # OUTPUT_SHAPE: (BS, 16, 16, 192)
        'block_13_expand_relu',  # OUTPUT_SHAPE: (BS, 8, 8, 576)
        'block_16_project'  # OUTPUT_SHAPE: (BS, 4, 4, 320)
    ]

    layers = [base_model.get_layer(name).output for name in layer_names]

    # Create the feature extraction encoder with 5 outputs
    # The last output is the input of the decoder
    # the 4th, 3rd, 2nd, and 1st outputs are the 1st, 2nd, 3rd, and 4th skip connections the decoder
    down_stack = Model(inputs=base_model.input, outputs=layers)

    # Make it non-trainable
    down_stack.trainable = False

    return down_stack
Beispiel #18
0
    def backbone(x):
        if backbone_type == 'ResNet50':
            extractor = ResNet50(
                input_shape=x.shape[1:], include_top=False, weights=weights)
            pick_layer1 = 80  # [80, 80, 512]
            pick_layer2 = 142  # [40, 40, 1024]
            pick_layer3 = 174  # [20, 20, 2048]
            preprocess = tf.keras.applications.resnet.preprocess_input
        elif backbone_type == 'MobileNetV2':
            extractor = MobileNetV2(
                input_shape=x.shape[1:], include_top=False, weights=weights)
            pick_layer1 = 54  # [80, 80, 32]
            pick_layer2 = 116  # [40, 40, 96]
            pick_layer3 = 143  # [20, 20, 160]
            preprocess = tf.keras.applications.mobilenet_v2.preprocess_input
        else:
            raise NotImplementedError(
                'Backbone type {} is not recognized.'.format(backbone_type))

        return Model(extractor.input,
                     (extractor.layers[pick_layer1].output,
                      extractor.layers[pick_layer2].output,
                      extractor.layers[pick_layer3].output),
                     name=backbone_type + '_extrator')(preprocess(x))
def create_mask_detector_mobilenet(input_shape):
    # use mobilenetv2 as a feature extractor
    input_layer = Input(input_shape)  # (X,Y,channel)
    mobilenetv2 = MobileNetV2(
        input_shape=input_shape,
        weights="imagenet",
        include_top=False,
        input_tensor=input_layer,
    )

    # freeze the weights in mobilenetv2
    for layer in mobilenetv2.layers:
        layer.trainable = False

    # let's turn it into a classifier
    X = mobilenetv2.output
    X = AveragePooling2D(pool_size=(7, 7))(X)
    X = Flatten()(X)
    X = Dense(512, activation="relu")(X)
    X = Dropout(0.4)(X)
    X = Dense(256, activation="relu")(X)
    X = Dense(2, activation="softmax")(X)

    return Model(inputs=input_layer, outputs=X)
Beispiel #20
0
    def __init__(self,
                 input_size=(256, 256, 3),
                 output_channels=3):
        self.pretrained_model = MobileNetV2(
            input_shape=input_size,
            include_top=False,
            weights='imagenet')

        self.target_layers = [
            'block_1_expand_relu',
            'block_3_expand_relu',
            'block_6_expand_relu',
            'block_13_expand_relu',
            'block_16_project'
        ]

        self.input_size = input_size
        self.output_channels = output_channels

        self.model = self._create_model()
        loss = SparseCategoricalCrossentropy(from_logits=True)
        self.model.compile(optimizer=RMSprop(),
                           loss=loss,
                           metrics=['accuracy'])
Beispiel #21
0
def create_model_wlasl2000(frames, width, height, channels, output):

    model = Sequential([
        # ConvNet
        TimeDistributed(
            MobileNetV2(
                weights="imagenet",
                include_top=False,
                input_shape=[height, width, channels],
            ),
            input_shape=[frames, height, width, channels],
        ),
        TimeDistributed(GlobalAveragePooling2D()),
        # GRUs
        GRU(256, return_sequences=True),
        BatchNormalization(),
        GRU(256),
        # Feedforward
        Dense(units=1000, activation="relu"),
        Dropout(0.75),
        Dense(units=output, activation="softmax"),
    ])

    return model
Beispiel #22
0
def build_MobileNet_finetuning(img_shape=(256,256,3),num_classes=1):
    
    base_model = MobileNetV2(include_top=False, input_shape = img_shape,weights='imagenet')

    base_model.trainable = True
    
    fine_tune_at = 100
    
    for layer in base_model.layers[:fine_tune_at]:
        layer.trainable =  False    
    
    x = model.output
    
    x = GlobalAveragePooling2D()(x)
    
    outputs = Dense(num_classes
                    ,kernel_initializer = "he_normal"
                    , activation='sigmoid')(x)  
    final_model = Model(inputs=[base_model.input], outputs=[outputs])
    adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0)
    
    final_model.compile(optimizer=adam, loss='binary_crossentropy', metrics=['accuracy'])

    return final_model
Beispiel #23
0
        base_model = ResNet50V2(input_shape=(img_height, img_width, 3),
                                include_top=False,
                                weights=None)
    elif BACKBONE.lower() == "InceptionResNetV2".lower():
        base_model = InceptionResNetV2(input_shape=(img_height, img_width, 3),
                                       include_top=False,
                                       weights=None)
    elif BACKBONE.lower() == "EfficientNet".lower():
        base_model = efn.EfficientNetB5(
            input_shape=(img_height, img_width, 3),
            weights=None,
            include_top=False,
        )
    elif BACKBONE.lower() == "MobileNetV2".lower():
        base_model = MobileNetV2(input_shape=(img_height, img_width, 3),
                                 include_top=False,
                                 weights=None)
    else:
        raise NotImplementedError("Unknown backbone \'{}\' ".format())
    base_model.trainable = True

    # define top layers
    x = base_model.output
    x = Flatten()(x)
    x = Dropout(DROPOUT)(x)  # this dropout here also seems to help
    x = Dense(2048)(x)
    x = Dropout(DROPOUT)(x)
    x = Dense(2048)(x)
    x = Dropout(DROPOUT)(x)
    predictions = Dense(1, activation="sigmoid")(x)
    model = Model(inputs=base_model.input, outputs=predictions)
Beispiel #24
0
 def __init__(self):
     super().__init__()
     self.mobilenet = MobileNetV2(include_top=False, pooling=None, weights='imagenet', input_shape=(96, 96, 3))
     self.local_pool = AveragePooling2D((3,3))
     self.global_avg_pool = GlobalAveragePooling2D()
     self.freeze_all_layers()
import tensorflow as tf
from tensorflow.keras.applications.resnet50 import ResNet50
from tensorflow.keras.applications import MobileNetV2
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dense
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import os


model = Sequential()
model.add(MobileNetV2(include_top=False,
          weights="imagenet", input_shape=(224, 224, 3)))
model.add(tf.keras.layers.GlobalAveragePooling2D())
model.add(tf.keras.layers.Dropout(0.2))
model.add(tf.keras.layers.Dense(512, activation='relu'))
model.add(tf.keras.layers.Dense(64, activation='relu'))
model.add(tf.keras.layers.Dense(8, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.layers[0].trainable = False

model.summary()

train_datagen = ImageDataGenerator(
    preprocessing_function=tf.keras.applications.mobilenet.preprocess_input)
train_generator = train_datagen.flow_from_directory('./DATASET/TRAIN',
                                                    target_size=(224, 224),
                                                    color_mode='rgb',
                                                    batch_size=120,
                                                    class_mode='binary',
                                                    shuffle=True)
Beispiel #26
0
def get_model(input_type=INPUT_DEPTH,
              input_shape=(96, 96),
              output_type=OUTPUT_BITERNION,
              weight_decay=0.00005,
              activation=relu6,
              n_classes=None,
              **kwargs):

    # check arguments
    assert input_type in [INPUT_DEPTH, INPUT_RGB]
    assert input_shape in INPUT_SHAPES
    assert output_type in OUTPUT_TYPES
    assert n_classes is not None or output_type != OUTPUT_CLASSIFICATION
    assert K.image_data_format() == 'channels_last'

    if weight_decay is not None:
        warnings.warn("given weight_decay is applied to output stage only")

    if activation is not None:
        warnings.warn("given activation is applied to output stage only")

    for kw in kwargs:
        if kw in ['sampling']:
            warnings.warn("argument '{}' not supported for MobileNet v2"
                          "".format(kw))

    if 'alpha' not in kwargs:
        warnings.warn("no value for alpha given, using default: 1.0")
    alpha = kwargs.get('alpha', 1.0)

    # regularizer
    reg = l2(weight_decay) if weight_decay is not None else None

    # define input ------------------------------------------------------------
    if input_type == INPUT_DEPTH:
        input_ = depth_input(input_shape)
    elif input_type == INPUT_RGB:
        input_ = rgb_input(input_shape)
    else:
        raise ValueError("input type: {} not supported".format(input_type))

    # build model -------------------------------------------------------------
    # load base model with pretrained weights
    mobile_net = MobileNetV2(
        input_shape=input_shape + (3, ),
        alpha=alpha,
        # depth_multiplier=1,    # does not exit any more
        include_top=False,
        weights='imagenet',
        input_tensor=None,
        pooling='avg',
        classes=None)

    # if the input is a depth image, we have to convert the kernels of the
    # first conv layer
    if input_type == INPUT_DEPTH:
        # easiest way: modify config, recreate model and copy modified weights
        # get config
        cfg = mobile_net.get_config()
        # modify input shape
        batch_input_shape = (None, ) + input_shape + (1, )
        cfg['layers'][0]['config']['batch_input_shape'] = batch_input_shape
        # instantiate a new model
        mobile_net_mod = Model.from_config(cfg)
        # copy (modified) weights
        assert len(mobile_net.layers) == len(mobile_net_mod.layers)
        for l_mod, l in zip(mobile_net_mod.layers, mobile_net.layers):
            # get weights
            weights = l.get_weights()

            # modify kernels for Conv1 (sum over input channels)
            if l.name == 'Conv1':
                assert len(weights) == 1, "Layer without bias expected"
                kernels = weights[0]
                kernels_mod = kernels.sum(axis=2, keepdims=True)
                weights_mod = (kernels_mod, )
            else:
                weights_mod = weights

            # set (modified) weights
            l_mod.set_weights(weights_mod)

        mobile_net = mobile_net_mod

    # build final model
    x = mobile_net(input_)
    x = Flatten(name='output_1_flatten')(x)
    x = Dropout(rate=0.2, name='output_2_dropout')(x)
    x = Dense(units=512, kernel_regularizer=reg, name='output_2_dense')(x)
    x = Activation(activation, name='output_2_act')(x)
    x = Dropout(rate=0.5, name='output_3_dropout')(x)

    if output_type == OUTPUT_BITERNION:
        kernel_initializer = RandomNormal(mean=0.0, stddev=0.01)
        x = biternion_output(kernel_initializer=kernel_initializer,
                             kernel_regularizer=reg,
                             name='output_3_dense_and_act')(x)
    elif output_type == OUTPUT_REGRESSION:
        x = regression_output(kernel_regularizer=reg,
                              name='output_3_dense_and_act')(x)
    elif output_type == OUTPUT_CLASSIFICATION:
        x = classification_output(n_classes,
                                  name='output_3_dense_and_act',
                                  kernel_regularizer=reg)(x)

    return Model(inputs=input_, outputs=[x])
Beispiel #27
0
def cifar_model(x_test, model_name='MobileNetV2', num_classes=10):
    # Load model from tensorlfow
    include_top = False
    weights = 'imagenet'
    input_tensor = Input(x_test.shape[1:], dtype='float16')
    input_shape = x_test.shape[1:]
    pooling = None
    classes = num_classes

    if model_name == 'resnetv1SE':
        KerasModel = resnet_v1(input_shape,
                               depth=50,
                               SE_impl=True,
                               include_top=include_top,
                               num_classes=classes)

    if model_name == 'resnetv1':
        KerasModel = resnet_v1(input_shape,
                               depth=50,
                               SE_impl=False,
                               include_top=include_top,
                               num_classes=classes)

    if model_name == 'MobileNet':
        KerasModel = MobileNet(include_top=include_top,
                               weights=weights,
                               input_tensor=input_tensor,
                               input_shape=input_shape,
                               pooling=pooling,
                               classes=classes)

    elif model_name == 'MobileNetV2':
        KerasModel = MobileNetV2(include_top=include_top,
                                 weights=weights,
                                 input_tensor=input_tensor,
                                 input_shape=input_shape,
                                 pooling=pooling,
                                 classes=classes)

    elif model_name == 'ResNet50V2':
        KerasModel = ResNet50V2(
            include_top=include_top,
            weights=weights,
            input_tensor=input_tensor,
            input_shape=input_shape,
            pooling=pooling,
            # classifier_activation='softmax',
            classes=classes)

    elif model_name == 'ResNet50':
        KerasModel = ResNet50(
            include_top=include_top,
            weights=weights,
            input_tensor=input_tensor,
            input_shape=input_shape,
            pooling=pooling,
            # classifier_activation='softmax',
            classes=classes)

    elif model_name == 'DenseNet121':
        KerasModel = DenseNet121(include_top=include_top,
                                 weights=weights,
                                 input_tensor=input_tensor,
                                 input_shape=input_shape,
                                 pooling=pooling,
                                 classes=classes)
    inputs = KerasModel.input
    output = KerasModel.output
    x = GlobalAveragePooling2D()(output)
    x = Flatten()(x)
    x = Dense(num_classes, kernel_initializer='he_normal')(x)
    outputs = Softmax(dtype='float32')(x)

    # Instantiate model.
    model = Model(inputs=inputs, outputs=outputs)
    return model
                   (tf.TensorShape([]), tf.TensorShape([]), tf.TensorShape([]))),
)


train_size = int(0.8 * DATASET_SIZE)
test_size = int(0.2 * DATASET_SIZE)

dataset = dataset.shuffle(buffer_size=2048)
train_ds = dataset.take(train_size)
test_ds = dataset.skip(train_size)


""" Define Model """
base_model = MobileNetV2(
    weights="imagenet",
    include_top=False,
    input_tensor=tf.keras.layers.Input(shape=(224, 224, 3))
)


head_model = base_model.output
head_model = tf.keras.layers.GlobalMaxPooling2D()(head_model)

y = tf.keras.layers.Dense(512)(head_model)
y = tf.keras.layers.BatchNormalization()(y)
y = tf.keras.layers.Activation('relu')(y)

x = tf.keras.layers.concatenate([head_model, y])
x = tf.keras.layers.Dense(512)(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Activation('relu')(x)
Beispiel #29
0
                                                  stratify=labels,
                                                  random_state=42)

# construct the training image generator for data augmentation
aug = ImageDataGenerator(rotation_range=20,
                         zoom_range=0.15,
                         width_shift_range=0.2,
                         height_shift_range=0.2,
                         shear_range=0.15,
                         horizontal_flip=True,
                         fill_mode="nearest")

# load the MobileNetV2 network, ensuring the head FC layer sets are
# left off
baseModel = MobileNetV2(weights="imagenet",
                        include_top=False,
                        input_tensor=Input(shape=(224, 224, 3)))

# construct the head of the model that will be placed on top of the
# the base model
headModel = baseModel.output
headModel = AveragePooling2D(pool_size=(7, 7))(headModel)
headModel = Flatten(name="flatten")(headModel)
headModel = Dense(128, activation="relu")(headModel)
headModel = Dropout(0.5)(headModel)
headModel = Dense(2, activation="softmax")(headModel)

# place the head FC model on top of the base model (this will become
# the actual model we will train)
model = Model(inputs=baseModel.input, outputs=headModel)
Beispiel #30
0
def unfreeze_model(model):
    # Unfreeze the entire model, can also choose to unfreeze only layers after the 100th layer
    for layer in model.layers:
        if not isinstance(layer, layers.BatchNormalization):
            layer.trainable = True

    optimizer = tf.keras.optimizers.Adam(learning_rate=1e-4)
    model.compile(optimizer=optimizer,
                  loss="categorical_crossentropy",
                  metrics=["accuracy"])


inputs = layers.Input(shape=(input_size, input_size, 3))
model = MobileNetV2(weights='imagenet',
                    include_top=False,
                    input_tensor=inputs,
                    input_shape=(input_size, input_size, 3))

# Freeze the base model
model.trainable = False

x = layers.GlobalAveragePooling2D(name="avg_pool")(model.output)
x = layers.BatchNormalization()(x)
dropout_rate = 0.2
x = layers.Dropout(dropout_rate, name="top_dropout")(x)
outputs = layers.Dense(24, activation="softmax", name="pred")(x)
model = tf.keras.Model(inputs, outputs, name="MobileNet")
optimizer = tf.keras.optimizers.Adam(learning_rate=1e-2)
model.compile(optimizer=optimizer,
              loss='categorical_crossentropy',
              metrics=['accuracy'])