def mobilenet_retinanet(num_classes, backbone='mobilenet224_1.0', inputs=None, modifier=None, **kwargs): """ Constructs a retinanet model using a mobilenet backbone. Args num_classes: Number of classes to predict. backbone: Which backbone to use (one of ('mobilenet128', 'mobilenet160', 'mobilenet192', 'mobilenet224')). inputs: The inputs to the network (defaults to a Tensor of shape (None, None, 3)). modifier: A function handler which can modify the backbone before using it in retinanet (this can be used to freeze backbone layers for example). Returns RetinaNet model with a MobileNet backbone. """ alpha = float(backbone.split('_')[1]) # choose default input if inputs is None: inputs = keras.layers.Input((None, None, 3)) backbone = mobilenetv2.MobileNetV2(input_tensor=inputs, alpha=alpha, include_top=False, pooling=None, weights=None) # create the full model layer_names = ['block_6_expand_relu', 'block_13_expand_relu', 'out_relu'] layer_outputs = [backbone.get_layer(name).output for name in layer_names] backbone = keras.models.Model(inputs=inputs, outputs=layer_outputs, name=backbone.name) # invoke modifier if given # if modifier: # backbone = modifier(backbone) for layer in backbone.layers: if re.search("bn|BN", layer.name): layer.trainable = False return retinanet.retinanet(inputs=inputs, num_classes=num_classes, backbone_layers=backbone.outputs, **kwargs)
def mobile2(input_image, **kwargs): from keras.applications import mobilenetv2 model = mobilenetv2.MobileNetV2(input_tensor=input_image, include_top=False, **kwargs) return [ KL.ZeroPadding2D(((1, 0), (1, 0)))(model.get_layer(name=n).output) for n in [ 'Conv1_pad', 'block_1_pad', 'block_3_pad', 'block_6_pad', 'block_13_pad' ] ]
def get_normal_mn2(): model = mobilenetv2.MobileNetV2(input_shape=(224, 224, 3), include_top=False) model = Model(inputs=model.input, outputs=model.layers[-3].input) x = model(input_image) x = Conv2D(N_BOX * (4 + 1 + CLASS), (1, 1), strides=(1, 1), padding='same', name='conv_23')(x) output = Reshape((GRID_H, GRID_W, N_BOX, 4 + 1 + CLASS))(x) # small hack to allow true_boxes to be registered when Keras build the model # for more information: https://github.com/fchollet/keras/issues/2790 output = Lambda(lambda args: args[0])([output, true_boxes]) model = Model([input_image, true_boxes], output) print(model.summary()) return model
def mobile_net(self, percent2retrain): 'Returns a mobilenet architecture NN' mobile_net_model = mobilenetv2.MobileNetV2(input_shape=self.input_dim, weights='imagenet', include_top=False) #freeze base layers if percent2retrain<1: for layer in mobile_net_model.layers[:-int(len(mobile_net_model.layers)*percent2retrain)]: layer.trainable = False # add classification top layer model = Sequential() model.add(mobile_net_model) model.add(Flatten()) model.add(Dense(512, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(self.n_classes, activation='sigmoid')) return model
def build_index(image_paths): set_idx = [] mv2 = keras_app.MobileNetV2(input_shape=None, alpha=1.0, depth_multiplier=1, include_top=True, weights='imagenet', input_tensor=None, pooling=None, classes=1000) for img_path in image_paths: processed_img = mnetv2_input_from_image( image.load_img(img_path, target_size=(224, 224))) pred = mv2.predict(processed_img) set_idx.append((img_path, pred)) with open('set_idx.pkl', 'wb') as f: pickle.dump(set_idx, f)
def get_all_nets(network_name, include_top=True): if (network_name == "ResNet50"): model = resnet50.ResNet50(weights='imagenet', include_top=include_top, input_shape=(224, 224, 3)) # if(include_top==False): # model.pop() elif (network_name == "MobileNetV2"): model = mobilenetv2.MobileNetV2(weights='imagenet', include_top=include_top, input_shape=(224, 224, 3)) elif (network_name == "VGG19"): model = vgg19.VGG19(weights='imagenet', include_top=include_top) elif (network_name == "SqueezeNet"): model = SqueezeNet(weights='imagenet', include_top=include_top) # if(include_top==False): # model.pop() # model.pop() # model.pop() # model.pop() return model
import numpy as np import cv2 import hnswlib import math from os import listdir, path from prev_approaches.database import PatchGraphDatabase import time patch_size = 96 window_size = 448 channel_count = 3 images_directory = "/Users/user/Desktop/household_images" feature_net = mobilenetv2.MobileNetV2(weights="imagenet", include_top=False, input_shape=(96, 96, 3)) database = PatchGraphDatabase() desc_index = hnswlib.Index(space='cosine', dim=1280) desc_index.init_index(max_elements=7000000, ef_construction=500, M=32) desc_index.set_ef(500) images = [(e, path.join(images_directory, e)) for e in listdir(images_directory)] def open_and_prepare_image(image_path): image = cv2.imread(image_path) return image
def main(): import argparse parser = argparse.ArgumentParser() parser.add_argument("train_class", type=str, choices=classes.keys()) parser.add_argument("data_dir", type=str) parser.add_argument('-e', "--epoch", required=False, type=int, default=64, dest="epoch") parser.add_argument('-ef', "--epoch-fine-tune", required=False, type=int, default=200, dest="epoch_fine_tune") parser.add_argument('-b', '--batch-size', required=False, default=1024, type=int, dest="batch") parser.add_argument('-lr', '--learning-rate', required=False, default=1e-4, type=float, dest="lr") parser.add_argument('-decay', '--learning-rate-decay', required=False, default=1e-6, type=float, dest="decay") parser.add_argument('-ignore-npz', '--ignore-precomputed-learning-file', required=False, default=False, type=bool, dest="ignore_npz") parser.add_argument('-ri', '--use-random-weight-initialisation', required=False, default=False, type=bool, dest="random_init") parser.add_argument('-ua', '--unfroze-all-convolution-layer-directly', required=False, default=False, type=bool, dest="unfroze_all") parser.add_argument('-m', '--model-name', required=False, default="MobileNetV2", type=str, dest="model_name") parser.add_argument('-d', '--dense-layer-size', required=False, nargs="*", default=[], type=int, dest="dense_size") parser.add_argument('-is', '--input-size', required=False, default=96, type=int, dest="input_size") parser.add_argument('-viz', '--data-visualisation', required=False, default=False, type=bool, dest="data_visualisation") args = parser.parse_args() batch_size = args.batch class_name = args.train_class out_classes = classes[class_name]["signs_classes"] rotation_and_flips = classes[class_name]["rotation_and_flips"] h_symmetry_classes = classes[class_name]["h_symmetry"] try: merge_sign_classes = classes[class_name]["merge_sign_classes"] except KeyError: merge_sign_classes = None mapping = {c: i for i, c in enumerate(out_classes)} mapping_id_to_name = {i: c for c, i in mapping.items()} os.makedirs(class_name, exist_ok=True) x_train, y_train, x_test, y_test = get_data_for_master_class(class_name=class_name, mapping=mapping, mapping_id_to_name=mapping_id_to_name, rotation_and_flips=rotation_and_flips, data_dir=args.data_dir, merge_sign_classes=merge_sign_classes, h_symmetry_classes=h_symmetry_classes, image_size=(args.input_size, args.input_size), ignore_npz=args.ignore_npz, out_classes=out_classes) if args.data_visualisation: preprocess_input = lambda x: x model = None else: if args.random_init: weights = None else: weights = 'imagenet' if args.model_name == "MobileNetV2": preprocess_input = mobilenetv2.preprocess_input base_model = mobilenetv2.MobileNetV2(weights=weights, include_top=False, input_shape=(args.input_size, args.input_size, 3), pooling='avg') elif args.model_name == "InceptionResNetV2": preprocess_input = inception_resnet_v2.preprocess_input base_model = inception_resnet_v2.InceptionResNetV2(weights=weights, include_top=False, input_shape=(args.input_size, args.input_size, 3), pooling='avg') elif args.model_name == "NASNetLarge": preprocess_input = nasnet.preprocess_input base_model = nasnet.NASNetLarge(weights=weights, include_top=False, input_shape=(args.input_size, args.input_size, 3), pooling='avg') else: raise ValueError("unknown model name {}, should be one of {}".format(args.model_name, ["MobileNetV2", "InceptionResNetV2", "NASNetLarge"])) predictions = base_model.outputs[0] for s in args.dense_size: predictions = Dense(s, activation='relu')(predictions) predictions = Dense(len(out_classes), activation='softmax')(predictions) model = Model(inputs=base_model.input, outputs=predictions) # model.summary() # blocks = {} # for i, layer in enumerate(base_model.layers): # s = layer.name.split('_') # if s[0] == "block": # b = int(s[1]) # if b not in blocks: # blocks[b] = [i] # else: # blocks[b].append(i) # exit(0) callbacks = [ModelCheckpoint(filepath="{}/checkpoint.h5".format(class_name), monitor="val_loss", mode='min', verbose=0, save_best_only="True", save_weights_only=False, period=1), EarlyStopping(monitor='val_acc', mode='max', min_delta=0.001, patience=40, verbose=1, restore_best_weights=True) ] x_test = np.stack([preprocess_input(i) for i in x_test]) datagen = ImageDataGenerator(featurewise_center=False, featurewise_std_normalization=False, rotation_range=10, width_shift_range=0.1, height_shift_range=0.1, brightness_range=(0.5, 1.4), shear_range=3.0, zoom_range=(0.7, 1.1), fill_mode='nearest', horizontal_flip=False, vertical_flip=False, preprocessing_function=preprocess_input) datagen.fit(x_train) if args.data_visualisation: for b in datagen.flow(x_train, y_train, batch_size=1): im, im_class = b[0][0], b[1][0] im_class = int(np.argmax(im_class)) plt.imshow(im.astype(np.int)) plt.title(out_classes[im_class]) plt.show() return if not args.random_init: # if the network is not randomly initialized, we first fine tune the last layers for layer in base_model.layers: layer.trainable = False model.compile(optimizer=rmsprop(lr=args.lr, decay=args.decay), loss='categorical_crossentropy', metrics=["accuracy"]) history = model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size), steps_per_epoch=ceil(len(x_train) / batch_size), epochs=args.epoch, verbose=1, validation_data=(x_test, y_test), use_multiprocessing=True, callbacks=callbacks) plot_history(history, "{0}/{1}_{0}_dense_".format(class_name, args.model_name)) model.save("{0}/{1}_{0}_dense.h5".format(class_name, args.model_name), overwrite=True) if not args.unfroze_all: # unfroze the 3 last blocks of mobile net for layer in model.layers[:113]: layer.trainable = False for layer in model.layers[113:]: layer.trainable = True model.compile(optimizer=SGD(lr=args.lr, momentum=0.9, decay=args.decay), loss='categorical_crossentropy', metrics=["accuracy"]) history = model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size), steps_per_epoch=ceil(len(x_train) / batch_size), epochs=args.epoch_fine_tune, verbose=1, validation_data=(x_test, y_test), use_multiprocessing=True, callbacks=callbacks) plot_history(history, "{0}/{1}_{0}_fine_tuning_1_".format(class_name, args.model_name)) model.save("{0}/{1}_{0}_1.h5".format(class_name, args.model_name), overwrite=True) # unfroze the 6 last blocks of mobile net for layer in model.layers[:87]: layer.trainable = False for layer in model.layers[87:]: layer.trainable = True model.compile(optimizer=SGD(lr=args.lr, momentum=0.9, decay=args.decay), loss='categorical_crossentropy', metrics=["accuracy"]) history = model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size), steps_per_epoch=ceil(len(x_train) / batch_size), epochs=args.epoch_fine_tune, verbose=1, validation_data=(x_test, y_test), use_multiprocessing=True, callbacks=callbacks) plot_history(history, "{0}/{1}_{0}_fine_tuning_2_".format(class_name, args.model_name)) model.save("{0}/{1}_{0}_2.h5".format(class_name, args.model_name), overwrite=True) # unfroze all model for layer in model.layers: layer.trainable = True model.compile(optimizer=SGD(lr=args.lr, momentum=0.9, decay=args.decay), loss='categorical_crossentropy', metrics=["accuracy"]) history = model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size), steps_per_epoch=ceil(len(x_train) / batch_size), epochs=args.epoch_fine_tune, verbose=1, validation_data=(x_test, y_test), use_multiprocessing=True, callbacks=callbacks) plot_history(history, "{0}/{1}_{0}_fine_tuning_f_".format(class_name, args.model_name)) model.save("{0}/{1}_{0}_final.h5".format(class_name, args.model_name), overwrite=True)
from keras import optimizers from keras.models import Sequential, Model from keras.layers import Dropout, Flatten, Dense, GlobalAveragePooling2D from keras.applications import mobilenetv2 from keras.callbacks import ModelCheckpoint, LearningRateScheduler, TensorBoard, EarlyStopping img_width, img_height = 224, 224 train_data_dir = "train" validation_data_dir = "val" nb_train_samples = 3200 nb_validation_samples = 800 batch_size = 16 epochs = 20 model = mobilenetv2.MobileNetV2(weights="imagenet", include_top=False, input_shape=(img_width, img_height, 3)) # Freeze the layers which you don't want to train. Here I am freezing the first 5 layers. for layer in model.layers: # model.layers[:x] for freezing first x layers layer.trainable = False #Adding custom Layers x = model.output x = Flatten()(x) x = Dense(1024, activation="relu")(x) x = Dropout(0.5)(x) x = Dense(1024, activation="relu")(x) predictions = Dense(2, activation="softmax")(x) # creating the final model
if False: from keras.applications.imagenet_utils import preprocess_input classes_num=9131 #10575 sz=192 net_model = MobileNet(weights=None, include_top=False, input_shape=(sz, sz, 3)) last_model_layer = net_model.output x = GlobalAveragePooling2D()(last_model_layer) x = Reshape((1,1,1024), name='reshape_1')(x) model = Model(net_model.input, x) model.load_weights('models/vgg2_mobilenet.h5') out = model.get_layer('reshape_1').output else: preprocess_input=mobilenetv2.preprocess_input model = mobilenetv2.MobileNetV2(alpha=1.4, input_shape=(224, 224, 3), include_top=False, weights=None, pooling='avg') #model = mobilenetv2.MobileNetV2(alpha=1.0, input_shape=(192, 192, 3), include_top=False, weights=None, pooling='avg') model.load_weights('../DNN_models/my_tf/mobilenet2_alpha=1.4_224_augm_ft_sgd_base.h5') out = model.get_layer('global_average_pooling2d_1').output print('out=',out) else: from keras_vggface.vggface import VGGFace from keras_vggface.utils import preprocess_input model_name, layer='vgg16','fc7/relu' #model_name, layer='resnet50','avg_pool' model = VGGFace(model=model_name) # pooling: None, avg or max out = model.get_layer(layer).output cnn_model = Model(model.input, out) cnn_model.summary() elif use_framework==MXNET:
y_test = np.array(data_06.item().get(mode)) x_train = x_train.astype('float32') / 255. x_test = x_test.astype('float32') / 255. x_train = np.reshape(x_train, (-1, img_size, img_size, 3)) x_test = np.reshape(x_test, (-1, img_size, img_size, 3)) y_train = np.reshape(y_train, (-1, output_size)) y_test = np.reshape(y_test, (-1, output_size)) inputs = Input(shape=(img_size, img_size, 3)) mobilenetv2_model = mobilenetv2.MobileNetV2(input_shape=(img_size, img_size, 3), alpha=1.0, depth_multiplier=1, include_top=False, weights='imagenet', input_tensor=inputs, pooling='max') net = Dense(128, activation='relu')(mobilenetv2_model.layers[-1].output) net = Dense(64, activation='relu')(net) net = Dense(output_size, activation='linear')(net) model = Model(inputs=inputs, outputs=net) model.summary() # training model.compile(optimizer=keras.optimizers.Adam(), loss='mse')
from keras.models import Sequential, Model from keras.utils import to_categorical import seaborn as sn import pandas as pd import matplotlib.pyplot as plt from sklearn.metrics import confusion_matrix #get_ipython().run_line_magic('matplotlib', 'inline') from sklearn.utils import shuffle import numpy as np from keras.models import Model from keras.applications import mobilenetv2 from keras.applications.mobilenetv2 import preprocess_input model_mobile = mobilenetv2.MobileNetV2(input_shape=(224, 224, 3), include_top=True, weights='imagenet', classes=1000) model2 = Model(input=model_mobile.input, output=model_mobile.layers[-2].output) print(model2.output_shape) from keras.utils import to_categorical from keras.utils import to_categorical def test_train_dev_split(input_data, output_data, train=0.8, dev=0.1, test=0.1): #make seed for exact results everything #input_data=preprocess_input(input_data)
def __init__(self, include_top=False, pooling=None, n_channels=None, cache_size=int(1e4), model='inception_v3', weights='imagenet', cache_dir=None, n_objects=None): self.include_top = include_top # determines if used for classification or featurization, TODO separate into two classes? self.n_channels = n_channels self.n_objects = n_objects self.pooling = pooling self.failed_urls = set() # NOTE: set cache_dir to None to turn off caching if cache_dir: # create default cache path in the current file dir w/ filename specifying config config = [ f'objects-{NUM_OBJECTS}' if include_top else 'features', str(cache_size), model, pooling if pooling else '', str(n_channels) if n_channels else '' ] config_str = '-'.join([c for c in config if c ]) # filter out empty strings and join w/ - cache_fname = f'imagenet-cache-{config_str}.pkl' self.cache_path = os.path.join(cache_dir, cache_fname) # TODO allow larger cache_size to still load from previous smaller caches else: self.cache_path = None if self.cache_path and os.path.isfile(self.cache_path): self.load_cache() else: self.cache = LRUCache(cache_size) if model == 'xception': self.model = xception.Xception(weights=weights, include_top=include_top, pooling=pooling) self.preprocess = xception.preprocess_input self.target_size = (299, 299) if include_top: self.decode = xception.decode_predictions else: self.output_dim = (n_channels if n_channels else 2048) * (1 if pooling else 10**2) elif model == 'inception_v3': self.model = inception_v3.InceptionV3(weights=weights, include_top=include_top, pooling=pooling) self.preprocess = inception_v3.preprocess_input self.target_size = (299, 299) if include_top: self.decode = inception_v3.decode_predictions else: self.output_dim = (n_channels if n_channels else 2048) * (1 if pooling else 8**2) elif model == 'mobilenet_v2': self.model = mobilenetv2.MobileNetV2(weights=weights, include_top=include_top, pooling=pooling) self.preprocess = mobilenetv2.preprocess_input self.target_size = (244, 244) if include_top: self.decode = mobilenetv2.decode_predictions else: self.output_dim = (n_channels if n_channels else 1280) * (1 if pooling else 7**2) else: raise Exception('model option not implemented') # NOTE: we force the imagenet model to load in the same scope as the functions using it to avoid tensorflow weirdness self.model.predict(np.zeros((1, *self.target_size, 3))) logging.info('imagenet loaded')
batch_size = int(sys.argv[3]) epochs = int(sys.argv[4]) # source activate Tensorflow ### start session config=tf.ConfigProto() #config.gpu_options.per_process_gpu_memory_fraction=0.98 config.gpu_options.allow_growth = True #avoid getting all available memory in GPU sess=tf.Session(config=config) img_width, img_height = 224, 224 inputShape = (img_width,img_height,3) new_model = mobilenetv2.MobileNetV2(alpha=1.0, depth_multiplier=1, include_top=False, weights='imagenet', input_shape=inputShape, pooling='avg') new_model.summary() x = new_model.output #x = Dense(224, activation='relu', name='fc2')(output_model) x = Dense(2, activation='softmax', name='predictions')(x) model = Model(inputs=new_model.input, outputs=x) adadelta = Adadelta(lr=0.5, rho=0.95, epsilon=1e-6) model.compile(loss='mean_squared_error', optimizer=adadelta, metrics=['mean_squared_error','accuracy']) model.summary() #Callbacks
test_datagen = image.ImageDataGenerator(rescale=1. / 255) train_generator = train_datagen.flow_from_directory(args["dataset_train"], target_size=(image_size, image_size), batch_size=batch_size, class_mode='categorical') validation_generator = test_datagen.flow_from_directory( args["dataset_val"], target_size=(image_size, image_size), batch_size=batch_size, class_mode='categorical') #pretrained_model = vgg16.VGG16(weights='imagenet', include_top=False, input_shape=(image_size, image_size, 3)) pretrained_model = mobilenetv2.MobileNetV2(input_shape=(image_size, image_size, 3), include_top=False, weights='imagenet') # Freeze all the layers #for layer in pretrained_model.layers[:-4]: # layer.trainable = False # Check the trainable status of the individual layers #for layer in pretrained_model.layers: # print(layer, layer.trainable) # logging.info(str(layer), str(layer.trainable)) # Create the model model = models.Sequential() # Add the pretrained-model convolutional base model
from keras.applications import mobilenetv2 from keras.optimizers import Adadelta import sys import numpy as np import os from tensorflow.python.client import device_lib if __name__ == '__main__': filelist = sys.argv[1] train_data_dir = sys.argv[2] batch_size = int(sys.argv[3]) inputTensor = Input(shape=(224, 224,3)) new_model = mobilenetv2.MobileNetV2( alpha=1.0, depth_multiplier=1, include_top=False, input_tensor=inputTensor, weights='imagenet', pooling='avg') model = Model(inputs=new_model.input, outputs=new_model.output) with open(filelist, 'r') as f: files = f.readlines() curdir = os.getcwd() i=0 while i<len(files): savefiles = [] imgbatch = [] completed = 0 while completed<batch_size and i<len(files): filepath = os.path.join(train_data_dir, files[i][:-1]) imgname = curdir + '/' + files[i][:-1] + '.dsc'