def load_mobilenet():
    """Loads the MobileNet model"""
    print("Loading the MobileNet model...")
    mobilenet = MobileNet(alpha=0.25)
    print("Model Loaded.")
    layer = mobilenet.get_layer('conv_pw_13_relu')
    return keras.Model(inputs=mobilenet.inputs, outputs=layer.output)
Esempio n. 2
0
    def MobileNet(self):
        model = models.Sequential()
        from keras.applications import MobileNet
        alpha = 0.75
        conv_base = MobileNet(include_top=False,
                              weights='imagenet',
                              input_shape=self.input_shape,
                              pooling='max',
                              alpha=alpha)
        print('MobileNet:\n')
        conv_base.summary()
        '''
        alpha: 控制网络的宽度:
                  如果alpha<1,则同比例的减少每层的滤波器个数
                  如果alpha>1,则同比例增加每层的滤波器个数
                  如果alpha=1,使用默认的滤波器个数
        '''
        model.add(conv_base)
        model.add(layers.Reshape((1, 1, int(1024 * alpha))))
        model.add(layers.Dropout(0.5))  # 以前是1e-3,但是我觉得这个概率太小了,不利于泛化
        model.add(
            layers.Conv2D(len(self.labels), (1, 1),
                          padding='same',
                          name='conv_preds'))
        model.add(layers.Activation('softmax', name='act_softmax'))
        model.add(layers.Reshape((len(self.labels), ), name='reshape_2'))

        model.compile(loss='categorical_crossentropy',
                      optimizer=optimizers.adam(lr=2e-3),
                      metrics=['acc'])
        return model
Esempio n. 3
0
def build_mobilenet(config, num_classes):
    log.info("Building Mobile Net")
    # CONSTANTS
    if config.load_cifar:
        IMAGE_SHAPE = (32, 32, 3)
    else:
        IMAGE_SHAPE = (224, 224, 3)

    feature_extractor_layer = MobileNet(
        include_top=False,
        weights="imagenet",
        input_shape=IMAGE_SHAPE,
        classes=num_classes,
    )
    feature_extractor_layer.trainable = False

    # Add a classification layer, which is a dense layer connected to num_classes nodes
    classification_layer = tf.keras.layers.Dense(num_classes)

    # Build the classifier
    classifier = tf.keras.Sequential()

    classifier.add(feature_extractor_layer)
    classifier.add(tf.keras.layers.GlobalAveragePooling2D())

    if config.with_dropout:
        classifier.add(Dropout(config.dropout_rate))
    classifier.add(classification_layer)

    classifier.summary()

    return classifier
def init():
    network = MobileNet(alpha=1.0)
    params = network.get_weights()

    graph = tf.Graph()
    with graph.as_default():
        images = np.random.rand(1, 224, 224, 3)

        inference(images, False)

        model_checkpoint_path = 'log/model_dump/model.ckpt'
        var_list = tf.get_collection('params')
        assert len(var_list) == len(params)
        saver = tf.train.Saver(var_list)

        with tf.Session(graph=graph) as sess:
            sess.run(tf.global_variables_initializer())
            for i in range(len(var_list)):
                if 'depthwise' in var_list[i].name and len(
                        params[i].shape) == 4:
                    params[i] = np.transpose(params[i], (0, 1, 3, 2))
                if len(params[i].shape) == 2:
                    params[i] = np.expand_dims(params[i], 0)
                    params[i] = np.expand_dims(params[i], 0)
                print(var_list[i].name, var_list[i].shape, params[i].shape)
                sess.run(tf.assign(var_list[i], params[i]))

            saver.save(sess,
                       model_checkpoint_path,
                       write_meta_graph=False,
                       write_state=False)
def mobilenet_prediction(model_path, query_path):

    model = MobileNet(weights='imagenet', include_top=False)

    #Extract features of Query Image
    imgq = image.load_img(query_path, target_size=(224, 224))
    img_dataq = image.img_to_array(imgq)
    img_dataq = np.expand_dims(img_dataq, axis=0)
    img_dataq = preprocess_input(img_dataq)
    mnet_feature_query = model.predict(img_dataq)
    mnet_feature_np_query = np.array(mnet_feature_query)
    mnet_feature_np_query = mnet_feature_np_query.flatten()

    listOfInput = [mnet_feature_np_query]

    loaded_model = load(model_path)

    probs = loaded_model.predict_proba(listOfInput)[:, :]

    print("probs", probs)
    #loaded_model.predict(listOfInput)

    #probs = model.predict_proba(listOfInput)

    category = []
    classes = loaded_model.classes_

    for index in range(len(classes)):
        category.append((probs[0][index], classes[index]))

    final_category = sorted(category, key=lambda x: x[0], reverse=True)

    return final_category[:3]
Esempio n. 6
0
    def __init__(self):
        self.name = 'CNN_App_Keras'
        self.input_shape = (224, 224)
        #self.model = Xception()
        self.model = MobileNet()
        self.class_names = tools_CNN_view.class_names

        return
Esempio n. 7
0
def mobile(X_train, Y_train):
    conv_base = MobileNet(weights='imagenet',
                          include_top=False,
                          input_shape=(128, 128, 3))
    samples_generator = DataGenerator(X_train, Y_train, **params)
    test_features = conv_base.predict_generator(samples_generator,
                                                verbose=True)

    return test_features
Esempio n. 8
0
def make_model():
    model = MobileNet(input_shape=(224, 224, 3),
                      alpha=1.,
                      weights=None,
                      classes=2)
    model.compile(optimizer=Adam(lr=0.002),
                  loss='categorical_crossentropy',
                  metrics=[categorical_crossentropy, categorical_accuracy])
    print(model.summary())
    return model
def get_model():
    # load pretrain weitght imagenet
    base_model=MobileNet(weights=None, input_shape=(224, 224, 3), include_top=False) #imports the mobilenet model and discards the last 1000 neuron layer.
    base_model.trainable = False
    inputs = keras.Input(shape=(224, 224, 3))
    x = base_model(inputs, training=False)
    x = keras.layers.GlobalAveragePooling2D()(x)
    x = keras.layers.Dropout(0.2)(x)  # Regularize with dropout
    outputs = keras.layers.Dense(6)(x)
    model = keras.Model(inputs, outputs)
    return model
Esempio n. 10
0
def trainFromScratch_MobileNet():
    from time import time
    from keras.callbacks import TensorBoard
    tensorboard = TensorBoard(log_dir="logs/resnet_CosmeticsBrands/{}".format(
        time()),
                              histogram_freq=0,
                              write_graph=True,
                              write_images=True)

    checkPoint = ModelCheckpoint(filepath='weights/ah_mobile_2cats.h5',
                                 save_best_only=True)

    model = MobileNet(weights="imagenet",
                      include_top=False,
                      input_shape=(image_size, image_size, 3))

    genterator_training = ImageDataGenerator(
        rescale=1. / 255,
        #rotation_range=0.4,
        #shear_range=0.2,
        #zoom_range=0.1,
        #width_shift_range=0.1,
        #height_shift_range=0.1,
        vertical_flip=True,
        horizontal_flip=True)

    augmented_training = genterator_training.flow_from_directory(
        train_data,
        target_size=(image_size, image_size),
        class_mode="categorical",
        shuffle=False,
        batch_size=batch_size)

    generator_test = ImageDataGenerator(rescale=1. / 255)
    augmented_testing = generator_test.flow_from_directory(
        test_data,
        target_size=(image_size, image_size),
        class_mode="categorical",
        batch_size=batch_size,
        shuffle=False)

    #    optimizer_1 = RMSprop(lr=0.0005)
    #    optimizer_2 = Adagrad()
    #    model.compile(optimizer_1, loss='categorical_crossentropy', metrics=["accuracy"])

    training_bottleneck_features = model.predict_generator(
        augmented_training, steps=training_instances // batch_size)
    np.save(open('weights/ah_trainig_bottleneck.npy', 'w'),
            training_bottleneck_features)

    testing_bottleneck_features = model.predict_generator(
        augmented_testing, steps=test_instances // batch_size)
    np.save(open('weights/ah_testing_bottleneck.npy', 'w'),
            testing_bottleneck_features)
Esempio n. 11
0
def example_predict():

    CNN = MobileNet()
    #CNN = Xception()

    img = cv2.imread('./data/ex_natural_images/dog/dog_0011.jpg')
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    img = cv2.resize(img, (224, 224)).astype(numpy.float32)

    prob = CNN.predict(preprocess_input(numpy.array([img])))
    idx = numpy.argsort(-prob[0])[0]
    print(class_names[idx], prob[0, idx])

    return
Esempio n. 12
0
def create_base_model(input_shape):
    # initialize pre-trained base model for fine tuning
    mobile_net_base = MobileNet(include_top=False,
                                weights='imagenet',
                                input_shape=(224, 224, 3))
    # freeze everything except last 3 layers for training
    for layer in mobile_net_base.layers[:-3]:
        layer.trainable = False

    inputs = Input(input_shape)

    # add pre-trained model
    x = mobile_net_base(inputs)
    # add new convolution layers and fully-connected layers
    x = GlobalAveragePooling2D()(x)
    x = Reshape((1, 1, 1024))(x)
    x = Dense(1024, activation='relu')(x)
    x = Dropout(0.5)(x)
    x = Dense(1024, activation='relu')(x)
    x = Dropout(0.5)(x)
    predictions = Dense(128, activation='relu')(x)

    base_model = Model(inputs=inputs, outputs=predictions)
    base_model.summary()
    return base_model
def create_base_model(input_shape, num_classes):

    # initialize pre-trained base model for fine tuning
    mobile_net_base = MobileNet(include_top=False,
                                weights='imagenet',
                                input_shape=(224, 224, 3))

    # freeze everything except last 3 layers for training
    for layer in mobile_net_base.layers[:-6]:
        layer.trainable = False

    model = Sequential()

    # add pre-trained model
    model.add(mobile_net_base)
    model.add(GlobalAveragePooling2D())
    model.add(Reshape((1, 1, 1024)))
    model.add(Flatten())

    # fully connected layers
    model.add(Dense(256, use_bias=False))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Dropout(0.5))

    model.add(Dense(128, use_bias=False))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Dropout(0.5))

    # final fully connected prediction layer
    model.add(Dense(num_classes))
    model.add(Activation('softmax'))
    model.summary()
    return model
Esempio n. 14
0
def create_mobilenet_model(use_weights=False):
    base_model = MobileNet(input_shape=(128, 128, 2),
                           include_top=False,
                           weights=None)
    # The output shape just before the pooling and dense layers is: (4, 4, 1024)
    x = base_model.output

    # 4 Conv layers in parallel with 2 4x4 filters each
    x = [Conv2D(2, 4, name='conv2d_{}'.format(i))(x) for i in range(1, 5)]
    x = Concatenate(name='concatenate_1')(x)
    x = Flatten(name='flatten_1')(x)

    model = Model(base_model.input, x, name='mobile_homographynet')

    if use_weights:
        weights_name = os.path.basename(MOBILENET_WEIGHTS_PATH)
        weights_path = get_file(
            weights_name,
            MOBILENET_WEIGHTS_PATH,
            cache_subdir='models',
            file_hash=
            'e161aabc5a04ff715a6f5706855a339d598d1216a4a5f45b90b8dbf5f8bcedc3')
        model.load_weights(weights_path)

    return model
Esempio n. 15
0
 def __init__(self):
     self.input_size = 128
     base = MobileNet(input_shape=(128, 128, 3), alpha=0.5, include_top=False, weights=os.path.dirname(os.path.abspath(__file__))+'/weight/mobilenetv1/mobilenet_5_0_128_tf_no_top.h5')
     top_layer = GlobalAveragePooling2D()(base.output)
     gender_layer = Dense(2, activation='softmax', name='gender_prediction')(top_layer)
     age_layer = Dense(101, activation='softmax', name='age_prediction')(top_layer)
     super().__init__(inputs=base.input, outputs=[gender_layer, age_layer], name='AgenderNetMobileNetV1')
Esempio n. 16
0
    def create_model(self):
        inputs = Input(shape=self.dim, name='input')
        model_mobilenet = MobileNet(input_shape=self.dim,
                                    alpha=1,
                                    depth_multiplier=1,
                                    dropout=self.dropout_global,
                                    include_top=False,
                                    weights=self.w,
                                    input_tensor=None)

        x = model_mobilenet(inputs)
        x = SeparableConv2D(filters=128,
                            kernel_size=(7, 7),
                            activation='relu',
                            padding='same')(x)
        x = Flatten()(x)
        x = BatchNormalization()(x)
        x = Dense(1024,
                  activation='relu',
                  kernel_regularizer=l2(self.regulizer))(x)
        x = Dropout(self.dropout)(x)
        z = Dense(self.classes, activation='tanh')(x)
        model = Model(inputs=inputs, outputs=z)

        adam = Adam(lr=self.eta)
        model.compile(optimizer=adam, loss=l1_loss, metrics=['mse', 'mae'])

        print(model.summary())

        return model
Esempio n. 17
0
def build_model(dim_img=(224, 224), n_channels=3):
    input_shape = dim_img + (n_channels, )

    # Import model discarding the last layers (output layers).
    base_model = MobileNet(weights=None,
                           include_top=False,
                           input_shape=input_shape)

    # Add our own layers here, at the end of the base_model
    x = base_model.output

    x = GlobalAveragePooling2D()(x)
    x = Dense(100, activation='relu')(
        x)  #, activity_regularizer=regularizers.l2(0.001))(x)
    x = Dropout(0.35)(x)
    x = Dense(85, activation='relu')(
        x)  #, activity_regularizer=regularizers.l2(0.001))(x)
    # x = Dense(500, activation='relu')(x)
    # x = Dense(100, activation='relu')(x) #TODO
    preds = Dense(6, activation='softmax')(x)

    # Create model based on our architecture
    model = Model(inputs=base_model.input, outputs=preds)

    # Save an image of the model architecture
    # plot_model(model, show_shapes=True, to_file='data/img_models/CWT_CNN_MobileNet.png')

    return model
Esempio n. 18
0
    def __model__(self):
        """Build & compile model keras.

        :return: (Keras.Sequential) model deep
        """
        # TODO refactor this shit code
        mobilenet = MobileNet(weights='imagenet',
                              include_top=False,
                              input_shape=(224, 224, 3))
        init = mobilenet.output
        pool1 = GlobalAveragePooling2D()(init)
        l1 = Dense(1024)(pool1)
        act1 = Activation(activation="relu")(l1)
        drop1 = Dropout(0.2)(act1)
        l2 = Dense(self.number_classes)(drop1)
        output = Activation(activation="softmax")(l2)
        model = Model(inputs=mobilenet.input, outputs=output)
        for layer in model.layers[:-6]:
            layer.trainable = False
        metrics = [
            'accuracy',
            keras_metrics.precision(),
            keras_metrics.recall()
        ]
        model.compile(optimizer='Adam', loss=self.loss, metrics=metrics)

        return model
Esempio n. 19
0
 def get_model():
     from keras.applications import MobileNet
     mobilenet = MobileNet(include_top=True, weights='imagenet',
                           input_shape=(224, 224, 3), classes=1000)
     inputs = {mobilenet.input_names[0]: ((1, 224, 224, 3), "float32")}
     mod, params = _get_keras_model(mobilenet, inputs)
     return mod, params, inputs
Esempio n. 20
0
def create_model():
    #imports the mobilenet model and discards top layers
    base_model = MobileNet(weights='imagenet', include_top=False)
    x = base_model.output
    # attach some new NN to learn on
    x = GlobalAveragePooling2D()(x)
    x = Dense(1024, activation='relu')(
        x
    )  #we add dense layers so that the model can learn more complex functions and classify for better results.
    x = Dense(1024, activation='relu')(x)  #dense layer 2
    x = Dense(512, activation='relu')(x)  #dense layer 3
    preds = Dense(2, activation='softmax')(
        x)  #final layer with softmax activation
    # specifiy inputs and outputs
    model = Model(
        inputs=base_model.input,
        outputs=preds)  #now a model has been created based on our architecture
    # freezes first 86 layers of mobileNet
    for layer in model.layers[:87]:
        layer.trainable = False
    # print to check layer architecture and trainable layers
    for i, layer in enumerate(model.layers):
        print(i, layer.name, layer.trainable)
    # select optimizer, loss function, and metric
    model.compile(optimizer='Adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    return model
Esempio n. 21
0
    def load_model(self, name_model=''):
        """Loads the model indicated by name_model.

        Args:
            name_model (str, optional): name of the model we want to load.
            Defaults to ''.
        """
        if name_model != '':
            if name_model in self.name_model:
                if name_model == 'VGG16':
                    self.MODEL = VGG16(weights='imagenet')
                    self.fonction_preprocessing = prepro_vg116
                elif name_model == "mobile_net":
                    self.MODEL = MobileNet()
                    self.fonction_preprocessing = prepro_mobile_net
                elif name_model == "efficient_net":
                    self.MODEL = EfficientNetB0()
                    self.fonction_preprocessing = prepro_efficient_net
                elif name_model == "efficient_netB3":
                    self.MODEL = EfficientNetB3()
                    self.fonction_preprocessing = prepro_efficient_net
            else:
                raise(Exception(f"name_model not found in {self.name_model}"))
        else:
            self.MODEL = VGG16(weights='imagenet')
            self.fonction_preprocessing = prepro_efficient_net
    def __call__(self):
        logging.debug("Creating model...")

        inputs = Input(shape=self._input_shape)
        model_mobilenet = MobileNet(input_shape=self._input_shape,
                                    alpha=self.alpha,
                                    depth_multiplier=1,
                                    dropout=1e-3,
                                    include_top=False,
                                    weights=self.weights,
                                    input_tensor=None,
                                    pooling=None)

        x = model_mobilenet(inputs)

        feat_a = GlobalAveragePooling2D()(x)
        feat_a = Dropout(0.5)(feat_a)
        feat_a = Dense(self.FC_LAYER_SIZE, activation="relu")(feat_a)

        pred_g_softmax = Dense(2, activation='softmax', name='gender')(feat_a)
        pred_a_softmax = Dense(self.num_neu, activation='softmax',
                               name='age')(feat_a)

        model = Model(inputs=inputs, outputs=[pred_g_softmax, pred_a_softmax])

        return model
Esempio n. 23
0
def mobileNet():
    global x_train, x_test
    print('MobileNet')
    x_train = np.resize(x_train, (len(x_train), 75, 75, 3))
    x_test = np.resize(x_test, (len(x_test), 75, 75, 3))

    conv_base = MobileNet(weights='imagenet',
                          include_top=False,
                          input_shape=(75, 75, 3))
    model = Sequential()
    model.add(conv_base)
    model.add(Flatten())
    model.add(Dense(10, activation='softmax'))
    model.summary()
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    model.fit(x_train,
              y_train,
              validation_data=(x_test, y_test),
              epochs=1,
              batch_size=1024,
              verbose=1)
    loss, acc = model.evaluate(x_test, y_test)
    print('손실값(Loss)', loss)
    print('정확도(Accuracy)', acc)
Esempio n. 24
0
    def MobileNet(self):
        model = models.Sequential()
        from keras.applications import MobileNet
        alpha = 1
        conv_base = MobileNet(include_top=False,
                              weights="imagenet",
                              input_shape=self.input_shape,
                              pooling='max',
                              alpha=alpha)
        print('MobileNet:\n')
        #conv_base.summary()
        '''
        alpha: 控制网络的宽度:
                  如果alpha<1,则同比例的减少每层的滤波器个数
                  如果alpha>1,则同比例增加每层的滤波器个数
                  如果alpha=1,使用默认的滤波器个数
        '''
        model.add(conv_base)
        model.add(layers.Reshape((1, 1, int(1024 * alpha))))
        model.add(
            layers.Dropout(1e-3)
        )  # see default parameter 'dropout=1e-3'in keras.applications.MobileNet() for more detail.
        model.add(
            layers.Conv2D(len(self.labels), (1, 1),
                          padding='same',
                          name='conv_preds'))
        model.add(layers.Activation('softmax', name='act_softmax'))
        model.add(layers.Reshape((len(self.labels), ), name='reshape_2'))

        model.compile(loss='categorical_crossentropy',
                      optimizer=self.optim,
                      metrics=['acc'])
        return model
Esempio n. 25
0
def build_mobile_model():
    # use the mobile net base model fix base model params for training
    base_model = MobileNet(
        weights='imagenet', include_top=False
    )  #imports the mobilenet model and discards the last 1000 neuron layer.

    # add some layers for training on top of base model
    x = base_model.output
    x = GlobalAveragePooling2D()(x)
    x = Dense(1024, activation='relu')(
        x
    )  #we add dense layers so that the model can learn more complex functions and classify for better results.
    x = Dense(1024, activation='relu')(x)
    x = Dense(512, activation='relu')(x)  #dense layer 2
    preds = Dense(3, activation='softmax')(
        x)  #final layer with softmax activation

    model = Model(inputs=base_model.input, outputs=preds)
    # check the model architect
    for i, layer in enumerate(model.layers):
        print(i, layer.name)

    # or if we want to set the first 50 layers of the network to be non-trainable
    for layer in model.layers[:20]:
        layer.trainable = False
    for layer in model.layers[20:]:
        layer.trainable = True

    return model
Esempio n. 26
0
    def build(self) -> Model:
        model = MobileNet(include_top=True,
                          weights=None,
                          input_shape=(self.width, self.height, self.channels),
                          classes=2)

        return model
Esempio n. 27
0
def load_mobilenet(width, height, classes_num):
    with tf.device('/cpu:0'):
        model = MobileNet(weights=None,
                          input_shape=(width, height, 3),
                          classes=classes_num)

    return model
Esempio n. 28
0
def build_model():
    return Sequential([
        drop_n_and_freeze(0, MobileNet(include_top=False, input_shape=(128, 128, 3))),
        Flatten(),
        Dense(256, activation='relu'),
        Dense(128, activation='relu'),
        Dropout(0.5),
        Dense(1, activation='sigmoid')])
Esempio n. 29
0
    def __call__(self):
        model = MobileNet(weights='imagenet',
                          include_top=False,
                          input_shape=(self.img_size, self.img_size, 3))
        x = model.output
        x = GlobalAveragePooling2D()(x)
        x = Dropout(0.15)(x)
        x = Dense(1024, activation='relu')(x)
        x = Dropout(0.15)(x)
        x = Dense(1024, activation='relu')(x)
        x = Dropout(0.15)(x)
        age_preds = Dense(7, activation='softmax', name="pred_age")(x)
        gender_preds = Dense(2, activation='softmax', name="pred_gender")(x)
        model = Model(inputs=model.input, outputs=[gender_preds, age_preds])
        model.load_weights(self.weights_file)

        return model
def mobilenet_16s(train_encoder=True,
                  final_layer_activation='sigmoid',
                  prep=True):
    '''
        This script creates a model object and loads pretrained weights 
    '''
    net = MobileNet(include_top=False, weights=None)
    if prep == True:
        net.load_weights(os.path.join('.', 'keras_preprocessing_weights.h5'),
                         by_name=True)
    else:
        net.load_weights(os.path.join('.', 'wences_preprocessing_weights.h5'),
                         by_name=True)

    for layer in net.layers:
        layer.trainable = train_encoder

    #build decoder
    predict = Conv2D(filters=1, kernel_size=1, strides=1)(net.output)
    deconv2 = Conv2DTranspose(filters=1,
                              kernel_size=4,
                              strides=2,
                              padding='same',
                              use_bias=False)(predict)
    pred_conv_pw_11_relu = Conv2D(filters=1, kernel_size=1, strides=1)(
        net.get_layer('conv_pw_11_relu').output)
    fuse1 = Add()([deconv2, pred_conv_pw_11_relu])
    deconv16 = Conv2DTranspose(filters=1,
                               kernel_size=32,
                               strides=16,
                               padding='same',
                               use_bias=False,
                               activation=final_layer_activation)(fuse1)

    return Model(inputs=net.input, outputs=deconv16)