def build_mobilenet(config, num_classes): log.info("Building Mobile Net") # CONSTANTS if config.load_cifar: IMAGE_SHAPE = (32, 32, 3) else: IMAGE_SHAPE = (224, 224, 3) feature_extractor_layer = MobileNet( include_top=False, weights="imagenet", input_shape=IMAGE_SHAPE, classes=num_classes, ) feature_extractor_layer.trainable = False # Add a classification layer, which is a dense layer connected to num_classes nodes classification_layer = tf.keras.layers.Dense(num_classes) # Build the classifier classifier = tf.keras.Sequential() classifier.add(feature_extractor_layer) classifier.add(tf.keras.layers.GlobalAveragePooling2D()) if config.with_dropout: classifier.add(Dropout(config.dropout_rate)) classifier.add(classification_layer) classifier.summary() return classifier
def get_model(): # load pretrain weitght imagenet base_model=MobileNet(weights=None, input_shape=(224, 224, 3), include_top=False) #imports the mobilenet model and discards the last 1000 neuron layer. base_model.trainable = False inputs = keras.Input(shape=(224, 224, 3)) x = base_model(inputs, training=False) x = keras.layers.GlobalAveragePooling2D()(x) x = keras.layers.Dropout(0.2)(x) # Regularize with dropout outputs = keras.layers.Dense(6)(x) model = keras.Model(inputs, outputs) return model
img_fire='/content/drive/My Drive/Python/nasa/fire_set/train/' x = image.img_to_array(img) model = models.Sequential() model.add(conv_base) model.add(layers.Flatten()) model.add(layers.Dense(256, activation='relu')) model.add(layers.Dense(64, activation='relu')) model.add(layers.Dense(1, activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer=optimizers.RMSprop(lr=2e-5), metrics=['acc']) model.summary() conv_base.trainable=False conv_base.trainable model.compile(loss='binary_crossentropy', optimizer=optimizers.RMSprop(lr=1e-4), metrics=['acc']) history = model.fit_generator( train_generator, steps_per_epoch=20, epochs=5, validation_data=validation_generator, validation_steps=10) #model.save('/content/drive/My Drive/Python/keras_st/firepredict_three.h5')
from keras import callbacks model = models.Sequential() model.add(conv_base) model.add(layers.Flatten()) model.add(layers.Dense(256, activation='relu')) model.add(layers.Dense(16, activation='softmax')) model.summary() #Before you compile and train the model, it’s very important to freeze the convolutional base. #Freezing a layer or set of layers means preventing their weights from being updated during training print('This is the number of trainable weights before freezing the conv base:', len(model.trainable_weights)) conv_base.trainable = False print('This is the number of trainable weights after freezing the conv base:', len(model.trainable_weights)) #Training the model end to end with a frozen convolutional base from keras.preprocessing.image import ImageDataGenerator from keras import optimizers train_datagen = ImageDataGenerator(rescale=1. / 255, rotation_range=40, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, zoom_range=0.2, horizontal_flip=True,
def mobilenet_model(init='glorot_uniform', activation='relu', dropout=0.5, regularizer='l2-0.01'): ''' Create an instance of the baseline model. Parameters ---------- init: str Weights initialization strategy. activation : str Activation function to use. batch_norm : boolean Whether to use batch normalization or not. dropout : float Ratio of weights to turn off before the final activation function regularizer: str : reg_type-reg_value Type and value of regularization to use, reg_type = l2 or l1 Returns: model: Sequential() The baseline model object ''' reg_type, reg_value = regularizer.split('-') if reg_type == 'l2': regularizer = layers.regularizers.l2(float(reg_value)) else: regularizer = layers.regularizers.l1(float(reg_value)) myConv2D = functools.partial(layers.Conv2D, kernel_initializer=init, kernel_regularizer=regularizer) mobnet_base = MobileNet(weights='imagenet', include_top=False, input_shape=(160, 160, 3)) # Since the mobilenet model was trained on Imagenet which doesn't contains much food images # We will drop some layers drop many (actually 20) deepest layers to ensure that mobilenet only extract # low level features like edges, basic shapes etc ... #for _ in xrange(20): pop(mobnet_base) ##### hum huuum: the above does not works, get key-error on final layer when saving model # now we freeze it mobnet_base.trainable = False # we create the complete network by appending randomly initialized layers model = Sequential() model.add(mobnet_base) model.add(myConv2D(128, (3, 3))) model.add(BatchNormalization()) model.add(layers.Activation(activation)) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Flatten()) model.add(layers.Dropout(dropout)) model.add( layers.Dense(256, activation=activation, kernel_initializer=init, kernel_regularizer=regularizer)) model.add(layers.Dense(1, activation='sigmoid')) logger.info(''' Created mobilenet model with params: init = {init} activation = {activation} dropout = {dropout} architecture = {architecture} '''.format(init=init, activation=activation, dropout=dropout, architecture=model.to_json())) return model
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' EPOCHS = 30 conv_base = MobileNet(weights='imagenet', include_top=False, input_shape=(128, 128, 3)) model = models.Sequential() model.add(conv_base) model.add(layers.Flatten()) model.add(layers.Dense(256, activation='relu')) model.add(layers.Dense(2, activation='sigmoid')) path = df.output_model + '/mobilenet_use.h5' model.load_weights(path) conv_base.trainable = True model.summary() model.compile(optimizer=optimizers.adam(lr=1e-5), loss='binary_crossentropy', metrics=['accuracy']) history = model.fit_generator(df.train_flow, steps_per_epoch=df.image_numbers // df.BATH_SIZE, epochs=EPOCHS, verbose=1, validation_data=df.val_flow, validation_steps=df.BATH_SIZE, callbacks=[TensorBoard(log_dir='log_dir2/2')]) model.save(df.output_model + '/mobilenet_fine_tune.h5')
from keras import optimizers from keras.preprocessing.image import ImageDataGenerator from keras.applications import MobileNet from keras.layers import Dense, GlobalAveragePooling2D from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelBinarizer pickle_in = open("/content/drive/My Drive/labelfile_4.pickle", "rb") example_dict = pickle.load(pickle_in) y_new = example_dict pickle_in = open('/content/drive/My Drive/trainfile_4.pickle', "rb") X_new = pickle.load(pickle_in) vggmodel = MobileNet(weights='imagenet', include_top=False) vggmodel.summary() vggmodel.trainable = False x = vggmodel.output x = GlobalAveragePooling2D()(x) x = Dense(1024, activation='relu')(x) x = Dense(1024, activation='relu')(x) x = Dense(512, activation='relu')(x) preds = Dense(3, activation='softmax')(x) model_final = Model(input=vggmodel.input, output=preds) from keras.optimizers import Adam opt = Adam(lr=0.0001) model_final.compile(loss=keras.losses.categorical_crossentropy, optimizer=opt,