def init(): network = MobileNet(alpha=1.0) params = network.get_weights() graph = tf.Graph() with graph.as_default(): images = np.random.rand(1, 224, 224, 3) inference(images, False) model_checkpoint_path = 'log/model_dump/model.ckpt' var_list = tf.get_collection('params') assert len(var_list) == len(params) saver = tf.train.Saver(var_list) with tf.Session(graph=graph) as sess: sess.run(tf.global_variables_initializer()) for i in range(len(var_list)): if 'depthwise' in var_list[i].name and len( params[i].shape) == 4: params[i] = np.transpose(params[i], (0, 1, 3, 2)) if len(params[i].shape) == 2: params[i] = np.expand_dims(params[i], 0) params[i] = np.expand_dims(params[i], 0) print(var_list[i].name, var_list[i].shape, params[i].shape) sess.run(tf.assign(var_list[i], params[i])) saver.save(sess, model_checkpoint_path, write_meta_graph=False, write_state=False)
def load_model(self, name_model=''): """Loads the model indicated by name_model. Args: name_model (str, optional): name of the model we want to load. Defaults to ''. """ if name_model != '': if name_model in self.name_model: if name_model == 'VGG16': self.MODEL = VGG16(weights='imagenet') self.fonction_preprocessing = prepro_vg116 elif name_model == "mobile_net": self.MODEL = MobileNet() self.fonction_preprocessing = prepro_mobile_net elif name_model == "efficient_net": self.MODEL = EfficientNetB0() self.fonction_preprocessing = prepro_efficient_net elif name_model == "efficient_netB3": self.MODEL = EfficientNetB3() self.fonction_preprocessing = prepro_efficient_net else: raise(Exception(f"name_model not found in {self.name_model}")) else: self.MODEL = VGG16(weights='imagenet') self.fonction_preprocessing = prepro_efficient_net
def build_mobilenet(config, num_classes): log.info("Building Mobile Net") # CONSTANTS if config.load_cifar: IMAGE_SHAPE = (32, 32, 3) else: IMAGE_SHAPE = (224, 224, 3) feature_extractor_layer = MobileNet( include_top=False, weights="imagenet", input_shape=IMAGE_SHAPE, classes=num_classes, ) feature_extractor_layer.trainable = False # Add a classification layer, which is a dense layer connected to num_classes nodes classification_layer = tf.keras.layers.Dense(num_classes) # Build the classifier classifier = tf.keras.Sequential() classifier.add(feature_extractor_layer) classifier.add(tf.keras.layers.GlobalAveragePooling2D()) if config.with_dropout: classifier.add(Dropout(config.dropout_rate)) classifier.add(classification_layer) classifier.summary() return classifier
def mobilenet_32s(train_encoder=True, final_layer_activation='sigmoid', prep=True): ''' This script creates a model object and loads pretrained weights ''' net = MobileNet(include_top=False, weights=None) if prep == True: net.load_weights(os.path.join('.', 'keras_preprocessing_weights.h5'), by_name=True) else: net.load_weights(os.path.join('.', 'wences_preprocessing_weights.h5'), by_name=True) for layer in net.layers: layer.trainable = train_encoder #build decoder predict = Conv2D(filters=1, kernel_size=1, strides=1)(net.output) deconv32 = Conv2DTranspose(filters=1, kernel_size=64, strides=32, padding='same', use_bias=False, activation=final_layer_activation)(predict) return Model(inputs=net.input, outputs=deconv32)
def get_model(): from keras.applications import MobileNet mobilenet = MobileNet(include_top=True, weights='imagenet', input_shape=(224, 224, 3), classes=1000) inputs = {mobilenet.input_names[0]: ((1, 224, 224, 3), "float32")} mod, params = _get_keras_model(mobilenet, inputs) return mod, params, inputs
def build_model(dim_img=(224, 224), n_channels=3): input_shape = dim_img + (n_channels, ) # Import model discarding the last layers (output layers). base_model = MobileNet(weights=None, include_top=False, input_shape=input_shape) # Add our own layers here, at the end of the base_model x = base_model.output x = GlobalAveragePooling2D()(x) x = Dense(100, activation='relu')( x) #, activity_regularizer=regularizers.l2(0.001))(x) x = Dropout(0.35)(x) x = Dense(85, activation='relu')( x) #, activity_regularizer=regularizers.l2(0.001))(x) # x = Dense(500, activation='relu')(x) # x = Dense(100, activation='relu')(x) #TODO preds = Dense(6, activation='softmax')(x) # Create model based on our architecture model = Model(inputs=base_model.input, outputs=preds) # Save an image of the model architecture # plot_model(model, show_shapes=True, to_file='data/img_models/CWT_CNN_MobileNet.png') return model
def mobilenet_prediction(model_path, query_path): model = MobileNet(weights='imagenet', include_top=False) #Extract features of Query Image imgq = image.load_img(query_path, target_size=(224, 224)) img_dataq = image.img_to_array(imgq) img_dataq = np.expand_dims(img_dataq, axis=0) img_dataq = preprocess_input(img_dataq) mnet_feature_query = model.predict(img_dataq) mnet_feature_np_query = np.array(mnet_feature_query) mnet_feature_np_query = mnet_feature_np_query.flatten() listOfInput = [mnet_feature_np_query] loaded_model = load(model_path) probs = loaded_model.predict_proba(listOfInput)[:, :] print("probs", probs) #loaded_model.predict(listOfInput) #probs = model.predict_proba(listOfInput) category = [] classes = loaded_model.classes_ for index in range(len(classes)): category.append((probs[0][index], classes[index])) final_category = sorted(category, key=lambda x: x[0], reverse=True) return final_category[:3]
def load_mobilenet(): """Loads the MobileNet model""" print("Loading the MobileNet model...") mobilenet = MobileNet(alpha=0.25) print("Model Loaded.") layer = mobilenet.get_layer('conv_pw_13_relu') return keras.Model(inputs=mobilenet.inputs, outputs=layer.output)
def MobileNet(self): model = models.Sequential() from keras.applications import MobileNet alpha = 0.75 conv_base = MobileNet(include_top=False, weights='imagenet', input_shape=self.input_shape, pooling='max', alpha=alpha) print('MobileNet:\n') conv_base.summary() ''' alpha: 控制网络的宽度: 如果alpha<1,则同比例的减少每层的滤波器个数 如果alpha>1,则同比例增加每层的滤波器个数 如果alpha=1,使用默认的滤波器个数 ''' model.add(conv_base) model.add(layers.Reshape((1, 1, int(1024 * alpha)))) model.add(layers.Dropout(0.5)) # 以前是1e-3,但是我觉得这个概率太小了,不利于泛化 model.add( layers.Conv2D(len(self.labels), (1, 1), padding='same', name='conv_preds')) model.add(layers.Activation('softmax', name='act_softmax')) model.add(layers.Reshape((len(self.labels), ), name='reshape_2')) model.compile(loss='categorical_crossentropy', optimizer=optimizers.adam(lr=2e-3), metrics=['acc']) return model
def create_mobilenet_model(use_weights=False): base_model = MobileNet(input_shape=(128, 128, 2), include_top=False, weights=None) # The output shape just before the pooling and dense layers is: (4, 4, 1024) x = base_model.output # 4 Conv layers in parallel with 2 4x4 filters each x = [Conv2D(2, 4, name='conv2d_{}'.format(i))(x) for i in range(1, 5)] x = Concatenate(name='concatenate_1')(x) x = Flatten(name='flatten_1')(x) model = Model(base_model.input, x, name='mobile_homographynet') if use_weights: weights_name = os.path.basename(MOBILENET_WEIGHTS_PATH) weights_path = get_file( weights_name, MOBILENET_WEIGHTS_PATH, cache_subdir='models', file_hash= 'e161aabc5a04ff715a6f5706855a339d598d1216a4a5f45b90b8dbf5f8bcedc3') model.load_weights(weights_path) return model
def create_base_model(input_shape): # initialize pre-trained base model for fine tuning mobile_net_base = MobileNet(include_top=False, weights='imagenet', input_shape=(224, 224, 3)) # freeze everything except last 3 layers for training for layer in mobile_net_base.layers[:-3]: layer.trainable = False inputs = Input(input_shape) # add pre-trained model x = mobile_net_base(inputs) # add new convolution layers and fully-connected layers x = GlobalAveragePooling2D()(x) x = Reshape((1, 1, 1024))(x) x = Dense(1024, activation='relu')(x) x = Dropout(0.5)(x) x = Dense(1024, activation='relu')(x) x = Dropout(0.5)(x) predictions = Dense(128, activation='relu')(x) base_model = Model(inputs=inputs, outputs=predictions) base_model.summary() return base_model
def __model__(self): """Build & compile model keras. :return: (Keras.Sequential) model deep """ # TODO refactor this shit code mobilenet = MobileNet(weights='imagenet', include_top=False, input_shape=(224, 224, 3)) init = mobilenet.output pool1 = GlobalAveragePooling2D()(init) l1 = Dense(1024)(pool1) act1 = Activation(activation="relu")(l1) drop1 = Dropout(0.2)(act1) l2 = Dense(self.number_classes)(drop1) output = Activation(activation="softmax")(l2) model = Model(inputs=mobilenet.input, outputs=output) for layer in model.layers[:-6]: layer.trainable = False metrics = [ 'accuracy', keras_metrics.precision(), keras_metrics.recall() ] model.compile(optimizer='Adam', loss=self.loss, metrics=metrics) return model
def mobileNet(): global x_train, x_test print('MobileNet') x_train = np.resize(x_train, (len(x_train), 75, 75, 3)) x_test = np.resize(x_test, (len(x_test), 75, 75, 3)) conv_base = MobileNet(weights='imagenet', include_top=False, input_shape=(75, 75, 3)) model = Sequential() model.add(conv_base) model.add(Flatten()) model.add(Dense(10, activation='softmax')) model.summary() model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) model.fit(x_train, y_train, validation_data=(x_test, y_test), epochs=1, batch_size=1024, verbose=1) loss, acc = model.evaluate(x_test, y_test) print('손실값(Loss)', loss) print('정확도(Accuracy)', acc)
def mobilenet_32s(train_encoder=True, final_layer_activation="sigmoid", prep=True): """ This script creates a model object and loads pretrained weights """ net = MobileNet(include_top=False, weights=None) if prep == True: net.load_weights(os.path.join(".", "mn_classification_weights.h5"), by_name=True) else: net.load_weights(os.path.join(".", "test_preprocessing_weights.h5"), by_name=True) for layer in net.layers: layer.trainable = train_encoder # build decoder predict = Conv2D(filters=1, kernel_size=1, strides=1)(net.output) deconv32 = Conv2DTranspose( filters=1, kernel_size=64, strides=32, padding="same", use_bias=False, activation=final_layer_activation, )(predict) return Model(inputs=net.input, outputs=deconv32)
def MobileNet(self): model = models.Sequential() from keras.applications import MobileNet alpha = 1 conv_base = MobileNet(include_top=False, weights="imagenet", input_shape=self.input_shape, pooling='max', alpha=alpha) print('MobileNet:\n') #conv_base.summary() ''' alpha: 控制网络的宽度: 如果alpha<1,则同比例的减少每层的滤波器个数 如果alpha>1,则同比例增加每层的滤波器个数 如果alpha=1,使用默认的滤波器个数 ''' model.add(conv_base) model.add(layers.Reshape((1, 1, int(1024 * alpha)))) model.add( layers.Dropout(1e-3) ) # see default parameter 'dropout=1e-3'in keras.applications.MobileNet() for more detail. model.add( layers.Conv2D(len(self.labels), (1, 1), padding='same', name='conv_preds')) model.add(layers.Activation('softmax', name='act_softmax')) model.add(layers.Reshape((len(self.labels), ), name='reshape_2')) model.compile(loss='categorical_crossentropy', optimizer=self.optim, metrics=['acc']) return model
def __call__(self): logging.debug("Creating model...") inputs = Input(shape=self._input_shape) model_mobilenet = MobileNet(input_shape=self._input_shape, alpha=self.alpha, depth_multiplier=1, dropout=1e-3, include_top=False, weights=self.weights, input_tensor=None, pooling=None) x = model_mobilenet(inputs) feat_a = GlobalAveragePooling2D()(x) feat_a = Dropout(0.5)(feat_a) feat_a = Dense(self.FC_LAYER_SIZE, activation="relu")(feat_a) pred_g_softmax = Dense(2, activation='softmax', name='gender')(feat_a) pred_a_softmax = Dense(self.num_neu, activation='softmax', name='age')(feat_a) model = Model(inputs=inputs, outputs=[pred_g_softmax, pred_a_softmax]) return model
def create_model(self): inputs = Input(shape=self.dim, name='input') model_mobilenet = MobileNet(input_shape=self.dim, alpha=1, depth_multiplier=1, dropout=self.dropout_global, include_top=False, weights=self.w, input_tensor=None) x = model_mobilenet(inputs) x = SeparableConv2D(filters=128, kernel_size=(7, 7), activation='relu', padding='same')(x) x = Flatten()(x) x = BatchNormalization()(x) x = Dense(1024, activation='relu', kernel_regularizer=l2(self.regulizer))(x) x = Dropout(self.dropout)(x) z = Dense(self.classes, activation='tanh')(x) model = Model(inputs=inputs, outputs=z) adam = Adam(lr=self.eta) model.compile(optimizer=adam, loss=l1_loss, metrics=['mse', 'mae']) print(model.summary()) return model
def load_mobilenet(width, height, classes_num): with tf.device('/cpu:0'): model = MobileNet(weights=None, input_shape=(width, height, 3), classes=classes_num) return model
def create_model(): #imports the mobilenet model and discards top layers base_model = MobileNet(weights='imagenet', include_top=False) x = base_model.output # attach some new NN to learn on x = GlobalAveragePooling2D()(x) x = Dense(1024, activation='relu')( x ) #we add dense layers so that the model can learn more complex functions and classify for better results. x = Dense(1024, activation='relu')(x) #dense layer 2 x = Dense(512, activation='relu')(x) #dense layer 3 preds = Dense(2, activation='softmax')( x) #final layer with softmax activation # specifiy inputs and outputs model = Model( inputs=base_model.input, outputs=preds) #now a model has been created based on our architecture # freezes first 86 layers of mobileNet for layer in model.layers[:87]: layer.trainable = False # print to check layer architecture and trainable layers for i, layer in enumerate(model.layers): print(i, layer.name, layer.trainable) # select optimizer, loss function, and metric model.compile(optimizer='Adam', loss='categorical_crossentropy', metrics=['accuracy']) return model
def __init__(self): self.input_size = 128 base = MobileNet(input_shape=(128, 128, 3), alpha=0.5, include_top=False, weights=os.path.dirname(os.path.abspath(__file__))+'/weight/mobilenetv1/mobilenet_5_0_128_tf_no_top.h5') top_layer = GlobalAveragePooling2D()(base.output) gender_layer = Dense(2, activation='softmax', name='gender_prediction')(top_layer) age_layer = Dense(101, activation='softmax', name='age_prediction')(top_layer) super().__init__(inputs=base.input, outputs=[gender_layer, age_layer], name='AgenderNetMobileNetV1')
def create_base_model(input_shape, num_classes): # initialize pre-trained base model for fine tuning mobile_net_base = MobileNet(include_top=False, weights='imagenet', input_shape=(224, 224, 3)) # freeze everything except last 3 layers for training for layer in mobile_net_base.layers[:-6]: layer.trainable = False model = Sequential() # add pre-trained model model.add(mobile_net_base) model.add(GlobalAveragePooling2D()) model.add(Reshape((1, 1, 1024))) model.add(Flatten()) # fully connected layers model.add(Dense(256, use_bias=False)) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(128, use_bias=False)) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Dropout(0.5)) # final fully connected prediction layer model.add(Dense(num_classes)) model.add(Activation('softmax')) model.summary() return model
def build_mobile_model(): # use the mobile net base model fix base model params for training base_model = MobileNet( weights='imagenet', include_top=False ) #imports the mobilenet model and discards the last 1000 neuron layer. # add some layers for training on top of base model x = base_model.output x = GlobalAveragePooling2D()(x) x = Dense(1024, activation='relu')( x ) #we add dense layers so that the model can learn more complex functions and classify for better results. x = Dense(1024, activation='relu')(x) x = Dense(512, activation='relu')(x) #dense layer 2 preds = Dense(3, activation='softmax')( x) #final layer with softmax activation model = Model(inputs=base_model.input, outputs=preds) # check the model architect for i, layer in enumerate(model.layers): print(i, layer.name) # or if we want to set the first 50 layers of the network to be non-trainable for layer in model.layers[:20]: layer.trainable = False for layer in model.layers[20:]: layer.trainable = True return model
def build(self) -> Model: model = MobileNet(include_top=True, weights=None, input_shape=(self.width, self.height, self.channels), classes=2) return model
def build_model(): return Sequential([ drop_n_and_freeze(0, MobileNet(include_top=False, input_shape=(128, 128, 3))), Flatten(), Dense(256, activation='relu'), Dense(128, activation='relu'), Dropout(0.5), Dense(1, activation='sigmoid')])
def __init__(self): self.name = 'CNN_App_Keras' self.input_shape = (224, 224) #self.model = Xception() self.model = MobileNet() self.class_names = tools_CNN_view.class_names return
def get_net(): if use_mobilenet: return MobileNet(include_top=False, weights=None, input_shape=(input_height, input_width, 3)) else: return NASNetMobile(include_top=False, weights='imagenet', input_shape=(input_height, input_width, 3))
def test_validate_keras_mobilenet(self): input_tensor = Input(shape=(224, 224, 3)) model = MobileNet(weights="imagenet", input_tensor=input_tensor) file_name = "keras" + model.name + ".pmml" pmml_obj = KerasToPmml(model, dataSet="image", predictedClasses=[str(i) for i in range(1000)]) pmml_obj.export(open(file_name, 'w'), 0) self.assertEqual(self.schema.is_valid(file_name), True)
def create_pre_train_model(self): model = Sequential() # after having Conv2D... # if pretrain_name == 'ResNet152V2': # model.add( # TimeDistributed( # ResNet152V2(weights='imagenet',include_top=False), # input_shape=(n_sequence, *dim, n_channels) # 5 images... # ) # ) # elif pretrain_name == 'Xception': # model.add( # TimeDistributed( # Xception(weights='imagenet',include_top=False), # input_shape=(n_sequence, *dim, n_channels) # 5 images... # ) # ) # elif pretrain_name == 'MobileNetV2': # model.add( # TimeDistributed( # # MobileNetV2(weights='imagenet',include_top=False), # MobileNetV2(weights='imagenet',include_top=False, alpha= alpha), # input_shape=(n_sequence, *dim, n_channels) # 5 images... # ) # ) # else: # raise ValueError('pretrain_name is incorrect') # MobileNetV2, MobileNet, ResNet152V2, Xception, VGG19, VGG16, DenseNet201 # print_cnn = VGG19(weights='imagenet', include_top=False) # print_cnn.summary() model.add( TimeDistributed( MobileNet(weights='imagenet', include_top=False), input_shape=self.input_shape # 5 images... )) model.add(TimeDistributed(GlobalAveragePooling2D() # Or Flatten() )) model.add(CuDNNLSTM(256, return_sequences=False)) model.add(Dense(64, activation='relu')) model.add(Dropout(.5)) model.add(Dense(32, activation='relu')) model.add(Dropout(.5)) # model.add(Dense(n_output)) model.add(Dense(self.nb_classes, activation='softmax')) # model.compile(optimizer='sgd', loss=my_loss, metrics=['sparse_categorical_accuracy']) model.summary() # model.compile(optimizer='sgd', loss='categorical_crossentropy', metrics=['accuracy']) self.compile(model) return model
def mobile(X_train, Y_train): conv_base = MobileNet(weights='imagenet', include_top=False, input_shape=(128, 128, 3)) samples_generator = DataGenerator(X_train, Y_train, **params) test_features = conv_base.predict_generator(samples_generator, verbose=True) return test_features
def get_microclassifier(mc_model_fns, mc_intermediate_layers, mobilenet_input_shape): """ Build a microclassifier (mobilenet -> microclassifier) """ # Load pre-trained mobilenet and set all layers to not be trainable print("Loading weights from 224x224 MobileNet...", end=" ", flush=True) mobilenet_base_model = MobileNet(input_shape=(224, 224, 3), include_top=False, weights='imagenet', input_tensor=None, pooling=None) print("Done.") print("Initializing {}x{} MobileNet...".format(*mobilenet_input_shape), end=" ", flush=True) mobilenet_reshaped_model = MobileNet(input_shape=mobilenet_input_shape, include_top=False, weights=None, input_tensor=None, pooling=None) print("Copying weights from 224x224 MobileNet...", end=" ", flush=True) for reshaped_layer, layer in zip(mobilenet_reshaped_model.layers, mobilenet_base_model.layers): reshaped_layer.set_weights(layer.get_weights()) print("Setting all MobileNet layers to be non-trainable...", flush=True) for layer in mobilenet_reshaped_model.layers: layer.trainable = False print("Done", flush=True) # Infer the input shape from the reshaped model full_mc_models = [] for mc_model_fn, mc_intermediate_layer in zip(mc_model_fns, mc_intermediate_layers): mc_input_shape = mobilenet_reshaped_model.get_layer( mc_intermediate_layer).output.shape[1:] mc_input_shape = tuple([int(dim) for dim in mc_input_shape]) full_mc_models.append( mc_model_fn(mc_input_shape)(mobilenet_reshaped_model.get_layer( mc_intermediate_layer).output)) full_model = Model(inputs=mobilenet_reshaped_model.input, outputs=full_mc_models[0]) full_model.summary() for layer in full_model.layers: if layer.trainable: print("Training: {}".format(layer.name)) return full_model