def _create_architecture(self, data_X, data_y): self.model = MobileNet(include_top=False, weights=None, input_tensor=None, input_shape=list( [int(_) for _ in data_X.shape[-3:]]), pooling=None) self.model.load_weights('./weights/mobilenet_1_0_224_tf_no_top.h5') """ Freeze the previous layers """ for layer in self.model.layers: layer.trainable = False """ By Setting top to False, we need to add our own classification layers """ # The model documentation notes that this is the size of the classification block x = GlobalAveragePooling2D()(self.model.output) # let's add a fully-connected layer x = Dense(1024, activation='relu')(x) x = Dropout(x, rate=0.5) # and a logistic layer -- let's say we have 200 classes x = Dense(int(data_y.shape[1]), activation='softmax', name='predictions')(x) # create graph of your new model self.model = Model(inputs=self.model.inputs, outputs=x, name='MobileNet') self.model.compile(optimizer=tf.train.AdamOptimizer(), loss='categorical_crossentropy', metrics=['accuracy', 'mean_squared_error'])
def backbone(input_shape=(512, 512, 3)): image = Input(shape=input_shape, name="image") base_model = MobileNet(input_tensor=image, include_top=False, weights="imagenet") x = base_model.output x = _deconv_block(x, 512, kernel_size=3) y = base_model.layers[81].output x = Add()([x, y]) x = _deconv_block(x, 256, kernel_size=3) y = base_model.layers[39].output x = Add()([x, y]) model = Model(image, x) return model, 8
def __init__(self, recent_log=None): # Init constants # Init classification buffer. Designed to smooth the classification self.name_to_load = [Config.DATA_DIR_NAMES[0]] * 5 self.name = 'MobileNetBayesian' tf.keras.backend.clear_session() self.STANDARD_IMAGE_SIZE = (224, 224, 3) model = MobileNet(include_top=False, weights=None, input_tensor=None, input_shape=self.STANDARD_IMAGE_SIZE, pooling=None) x = GlobalAveragePooling2D()(model.output) # let's add a fully-connected layer x = Dense(1024, activation='relu')(x) # and a logistic layer -- let's say we have 200 classes x = Dense(len(Config.DATA_DIR_NAMES), activation='softmax', name='predictions')(x) # create graph of your new model self.model = Model(inputs=model.inputs, outputs=x, name='MobileNet') # print(model.summary()) weights_path = os.path.join(get_absolute_data_path()[:-5], 'josiah_testing', 'run_logs') if recent_log is None: recent_log_dir = [ _ for _ in os.listdir(weights_path) if str(_).lower().__contains__(self.name.lower()) ] recent_log_dir.sort(reverse=True) else: recent_log_dir = [recent_log] weights_path = os.path.join(weights_path, recent_log_dir[0], 'model.h5') print(f'Loading final weight path: {weights_path}') # If we want to use weights, then try to load them self.model.load_weights(weights_path) global graph graph = tf.get_default_graph()
import os from tensorflow.python.keras.applications import MobileNet from tensorflow.python.keras.models import Model from tensorflow.python.keras.layers import Dense, GlobalAveragePooling2D from tensorflow.python.keras.preprocessing.image import ImageDataGenerator from tensorflow.python.keras.optimizers import Adam from tensorflow.python.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau from tensorflow.python.keras.callbacks import TensorBoard # MobileNet is designed to work with images of dim 224,224 img_rows, img_cols = 224, 224 MobileNet = MobileNet(weights='imagenet', include_top=False, input_shape=(img_rows, img_cols, 3)) # Here we freeze the last 4 layers # Layers are set to trainable as True by default for layer in MobileNet.layers: layer.trainable = True # Let's print our layers for (i, layer) in enumerate(MobileNet.layers): print(str(i), layer.__class__.__name__, layer.trainable) def add_top_model_mobile_net(bottom_model, num_class): """creates the top or head of the model that will be
def train(hyper_params, reset_dataset=False): # Define upper level settings InceptionResNetV2 global DATASET_LOADED, X, Y model_dir = f'./run_logs/{time()}_{"_".join("{!s}.{!r}".format(key,val) for (key,val) in hyper_params.items())}' # Get the directory path data_dir = get_absolute_data_path() target_dict = {name: i for i, name in enumerate(Config.DATA_DIR_NAMES)} if reset_dataset or not DATASET_LOADED: X, Y = load_images(hyper_params['n_classes'], data_dir, target_dict) DATASET_LOADED = True if hyper_params['sanity_test']: X_train, X_test, y_train, y_test = (X, X, Y, Y) else: X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.10) print("Done building data set") model = None x = None if hyper_params['name'] == 'VGG19': model = VGG19(include_top=False, weights=None, input_tensor=None, input_shape=STANDARD_IMAGE_SIZE, pooling=None) # If we want to use weights, then try to load them if hyper_params['use_weights']: model.load_weights( './weights/vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5') elif hyper_params['name'] == 'InceptionResNetV2': model = InceptionResNetV2(include_top=False, weights=None, input_tensor=None, input_shape=STANDARD_IMAGE_SIZE, pooling=None) # If we want to use weights, then try to load them if hyper_params['use_weights']: model.load_weights( './weights/inception_resnet_v2_weights_tf_dim_ordering_tf_kernels_notop.h5' ) elif hyper_params['name'] == 'NASNetMobile': model = NASNetMobile(include_top=False, weights=None, input_tensor=None, input_shape=STANDARD_IMAGE_SIZE, pooling=None) # If we want to use weights, then try to load them if hyper_params['use_weights']: model.load_weights('./weights/NASNet-mobile-no-top.h5') elif hyper_params['name'] == 'MobileNet': model = MobileNet(include_top=False, weights=None, input_tensor=None, input_shape=STANDARD_IMAGE_SIZE, pooling=None) # If we want to use weights, then try to load them if hyper_params['use_weights']: model.load_weights('./weights/mobilenet_1_0_224_tf_no_top.h5') elif hyper_params['name'] == 'MobileNetBayesian': model = MobileNet(include_top=False, weights=None, input_tensor=None, input_shape=STANDARD_IMAGE_SIZE, pooling=None) # If we want to use weights, then try to load them if hyper_params['use_weights']: model.load_weights('./weights/mobilenet_1_0_224_tf_no_top.h5') """ Freeze the previous layers """ for layer in model.layers: layer.trainable = False """ By Setting top to False, we need to add our own classification layers """ # The model documentation notes that this is the size of the classification block x = GlobalAveragePooling2D()(model.output) # let's add a fully-connected layer x = Dense(16, activation='relu')(x) # and a logistic layer -- let's say we have 200 classes x = Dense(hyper_params['n_classes'], activation='softmax', name='predictions')(x) # create graph of your new model model = Model(inputs=model.inputs, outputs=x, name=hyper_params['name']) if hyper_params['opt'] == 'sgd': opt = SGD(lr=0.01) else: opt = 'adam' model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy', 'mean_squared_error']) tensorboard = TrainValTensorBoard(log_dir=model_dir, histogram_freq=0, X_train=X_train, X_test=X_test, y_train=y_train, y_test=y_test, write_graph=True, write_images=False) """ Classes are going to be very imbalanced. Weight them """ class_weights = class_weight.compute_class_weight( 'balanced', np.unique([y.argmax() for y in y_train]), [y.argmax() for y in y_train]) """ Add image augmentation """ if hyper_params['use_aug']: datagen = tf.keras.preprocessing.image.ImageDataGenerator( featurewise_center=True, featurewise_std_normalization=True, channel_shift_range=0.5, rotation_range=180, width_shift_range=0.1, height_shift_range=0.1, brightness_range=[0.5, 1.0], horizontal_flip=True, vertical_flip=True, zoom_range=0.1) # compute quantities required for featurewise normalization # (std, mean, and principal components if ZCA whitening is applied) datagen.fit(X_train) # fits the model on batches with real-time data augmentation: model.fit_generator(datagen.flow(X_train, y_train, batch_size=32), steps_per_epoch=len(X_train) / 32, epochs=hyper_params['epochs'], validation_data=(X_test, y_test), callbacks=[tensorboard], class_weight=class_weights) else: model.fit(X_train, y_train, epochs=hyper_params['epochs'], validation_data=(X_test, y_test), callbacks=[tensorboard], class_weight=class_weights) print(f'\nEvaluation: {model.evaluate(X_test, y_test)}' ) # So this is currently: loss & accuracy prediction_y = model.predict(X_test) print(f'\nPrediction: {prediction_y}') print(f'\nFor Y targets {y_test}') # Save entire model to a HDF5 file model.save(model_dir + '/model.h5') cnf_matrix = confusion_matrix(np.argmax(y_test, axis=1), np.argmax(prediction_y, axis=1)) np.set_printoptions(precision=2) print(cnf_matrix) # plt.figure() # plot_confusion_matrix(cnf_matrix, classes=[c for c in target_dict], # title='Confusion matrix using ' + hyper_params['name']) # print(f'Saving confusion matrix to {model_dir + os.sep + "confusion_matrix.jpg"}') # plt.savefig(model_dir + os.sep + 'confusion_matrix.jpg') # # from sklearn.metrics import precision_recall_fscore_support # metrics = precision_recall_fscore_support(np.argmax(y_test, axis=1), np.argmax(prediction_y, axis=1), # average='weighted') # plot_precision_recall_f1(metrics, ['Precision', 'Recall', 'f Score'], # title='Metrics for ' + hyper_params['name']) # print(f'Saving confusion matrix to {model_dir + os.sep + "prec_recall_fscore.jpg"}') # plt.savefig(model_dir + os.sep + 'prec_recall_fscore.jpg') tf.keras.backend.clear_session()
plot_confusion_matrix(y, y_pred, classes, normalize=True) plt.show() def vgg16(model, path: Path, classes, size): vgg16_feature_list_np, y_real = load(path, classes, size) vgg16_feature_list_np = model.predict(vgg16_feature_list_np) clusterize(vgg16_feature_list_np, y_real, classes) if __name__ == "__main__": # model = ResNet50(weights='imagenet') model = MobileNet(weights='imagenet') # model = VGG16(weights='imagenet') classes = load_classes(base_path.joinpath("train"), count=5) vgg16(model, base_path.joinpath("test"), classes, (224, 224)) x, y = load_my_model(base_path.joinpath("test"), classes) # , (224, 224), flat=False) print(x.shape) my_model, features = make_model(x.shape[1:], classes, features=True) my_model.fit(x, y, epochs=10) y_pred = my_model.predict(x) # .astype(int) print(y_pred)
""" we set up a sequential model that we can add layers to """ my_new_model = Sequential() """ first we add all of pre-trained model we've written include_top=False, this is how specify that we want to exlude the layer that makes prediction into the thousands of categories used in the ImageNet competition we set the weights to be 'ImageNet' to specify that we use the pre-traind model on ImageNet pooling equals average says that if we had extra channels in our tensor at the end of this step we want to collapse them to 1d tensor by taking an average across channels now we have a pre-trained model that creates the layer before the last layer that we saw in the slides """ my_new_model.add( MobileNet(weights='imagenet', include_top=False, pooling='avg')) """ we add a dense layer to make predictions, we specify the number of nodes in this layer which in this case is the number of classes, then we want to apply the softmax function to turn it into probabilities """ my_new_model.add(Dense( num_classes, activation='softmax', )) """ we tell tensor flow not to train the first layer which is the pre-trained model because that's the model that was already pre-trained with the ImageNet data """ my_new_model.layers[0].trainable = False