def pretrained_r50(input_shape=(64, 64, 3), n_out=100, embed=2048): weight_decay = 1e-4 X_input = Input(input_shape) y = Input(shape=(n_out, )) from tensorflow.keras.applications.resnet50 import ResNet50 from tensorflow.keras.applications.resnet import ResNet101 # backbone = ResNet50(include_top=False, weights="imagenet", input_tensor=X_input) backbone = ResNet101(include_top=False, weights="imagenet", input_tensor=X_input) X = backbone.output # for layer in backbone.layers: # layer.trainable = False X = GeMPoolingLayer(p=3.0)(X) X = Flatten()(X) X = Dense(embed, kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(weight_decay))(X) output = ArcFace(n_out, s=30, m=0.3, regularizer=regularizers.l2(weight_decay))([X, y], ) model = Model([X_input, y], output) return model
def __init__(self, data_shape=(224, 224, 3), resnet_version=1, resnet_layer_number=50, num_classes=1000): super(ResNet, self).__init__() weights = None if num_classes == 1000 and data_shape == (224, 224, 3): weights = 'imagenet' self.resnet_version = resnet_version self.data_augmentation = keras.Sequential( [ layers.experimental.preprocessing.RandomFlip( "horizontal", input_shape=data_shape), layers.experimental.preprocessing.RandomRotation(0.1), layers.experimental.preprocessing.RandomZoom(0.1), ] ) self.rescaling = layers.experimental.preprocessing.Rescaling(1./255) def preprocess_input(x, data_format=None): from tensorflow.keras.applications import imagenet_utils return imagenet_utils.preprocess_input( x, data_format=data_format, mode='tf') #return x self.preprocess_input = preprocess_input if resnet_layer_number == 18: if resnet_version == 1: self.resnet = ResNet18(category_num=num_classes) else: self.resnet = ResNet18V2(category_num=num_classes) elif resnet_layer_number == 50: if resnet_version == 1: self.resnet = ResNet50(weights=weights, input_shape=data_shape, classes=num_classes) else: self.resnet = ResNet50V2(weights=weights, input_shape=data_shape, classes=num_classes) elif resnet_layer_number == 101: if resnet_version == 1: self.resnet = ResNet101(weights=weights, input_shape=data_shape, classes=num_classes) else: self.resnet = ResNet101V2(weights=weights, input_shape=data_shape, classes=num_classes) elif resnet_layer_number == 152: if resnet_version == 1: self.resnet = ResNet152(weights=weights, input_shape=data_shape, classes=num_classes) else: self.resnet = ResNet152V2(weights=weights, input_shape=data_shape, classes=num_classes) self.build((None,) + data_shape)
def get_model(architecture, iteracion, models_info, pipeline): print("="*len(architecture)) print(architecture) print("="*len(architecture)) if iteracion > 0: base_model = models_info[architecture]['model_memory'] if architecture == 'InceptionV3': from tensorflow.keras.applications.inception_v3 import InceptionV3, preprocess_input if iteracion == 0: base_model = InceptionV3(weights=pipeline['weights'],include_top=False,input_shape=(pipeline['img_height'], pipeline['img_width'], 3)) if architecture == 'InceptionV4': from tensorflow.keras.applications.inception_resnet_v2 import InceptionResNetV2, preprocess_input if iteracion == 0: base_model = InceptionResNetV2(weights=pipeline['weights'],include_top=False,input_shape=(pipeline['img_height'], pipeline['img_width'], 3)) if architecture == 'ResNet50': from tensorflow.keras.applications.resnet import ResNet50, preprocess_input if iteracion == 0: base_model = ResNet50(weights=pipeline['weights'],include_top=False,input_shape=(pipeline['img_height'], pipeline['img_width'], 3)) if architecture == 'ResNet101': from tensorflow.keras.applications.resnet import ResNet101, preprocess_input if iteracion == 0: base_model = ResNet101(weights=pipeline['weights'],include_top=False,input_shape=(pipeline['img_height'], pipeline['img_width'], 3)) if architecture == 'ResNet152': from tensorflow.keras.applications.resnet import ResNet152, preprocess_input if iteracion == 0: base_model = ResNet152(weights=pipeline['weights'],include_top=False,input_shape=(pipeline['img_height'], pipeline['img_width'], 3)) if architecture == 'DenseNet121': from tensorflow.keras.applications.densenet import DenseNet121, preprocess_input if iteracion == 0: base_model = DenseNet121(weights=pipeline['weights'],include_top=False,input_shape=(pipeline['img_height'], pipeline['img_width'], 3)) if architecture == 'DenseNet169': from tensorflow.keras.applications.densenet import DenseNet169, preprocess_input if iteracion == 0: base_model = DenseNet169(weights=pipeline['weights'],include_top=False,input_shape=(pipeline['img_height'], pipeline['img_width'], 3)) if architecture == 'DenseNet201': from tensorflow.keras.applications.densenet import DenseNet201, preprocess_input if iteracion == 0: base_model = DenseNet201(weights=pipeline['weights'],include_top=False,input_shape=(pipeline['img_height'], pipeline['img_width'], 3)) if architecture == 'NASNetLarge': from tensorflow.keras.applications.nasnet import NASNetLarge, preprocess_input if iteracion == 0: base_model = NASNetLarge(weights=pipeline['weights'],include_top=False,input_shape=(pipeline['img_height'], pipeline['img_width'], 3)) if architecture == 'Xception': from tensorflow.keras.applications.xception import Xception, preprocess_input if iteracion == 0: base_model = Xception(weights=pipeline['weights'], include_top=False, input_shape=(pipeline['img_height'], pipeline['img_width'], 3)) return base_model, preprocess_input
def img(): if request.method == 'GET': return render_template('deep-learning-img.html', menu=menu, weather=get_weather_main()) else: vgg16 = VGG16() vgg19 = VGG19() resnet50 = ResNet50() resnet101 = ResNet101() # inceptionv3 = InceptionV3() keras_dict = { 'vgg16': vgg16, 'vgg19': vgg19, 'resnet50': resnet50, 'resnet101': resnet101 } f_img = request.files['img'] file_img = os.path.join(current_app.root_path, 'static/upload/') + 'img.jpg' f_img.save(file_img) current_app.logger.debug(f"{f_img}, {file_img}") img = io.imread(file_img) # # 불안정함 # # img = cv2.imread(file, -1) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img1 = cv2.resize(img, (224, 224)) # img2 = cv2.resize(img, (299, 299)) result_list = [] name_list = [] for key, value in keras_dict.items(): yhat = value.predict(img1.reshape(-1, 224, 224, 3)) label = decode_predictions(yhat) label = label[0][0] label_per = f'{label[1]}({round(label[2]*100, 1)}%)' result_list.append(label_per) name_list.append(key) mtime = int(os.stat(file_img).st_mtime) result_dic = { 'result': result_list, 'name': name_list, 'img_name': f_img.filename, 'num': len(name_list) } return render_template('deep-learnig-img-res.html', menu=menu, weather=get_weather_main(), result_dic=result_dic, mtime=mtime)
def build_resnet101(NUM_CLASS): resnet101 = ResNet101(weights='imagenet', include_top=False) #model.summary() last_layer = resnet101.output x = GlobalAveragePooling2D()(last_layer) x = Dense(2048)(x) x = Dropout(0.5)(x) x = Dense(1024)(x) x = Dense(512)(x) x = Dropout(0.5)(x) # a softmax layer for 2 classes out = Dense(NUM_CLASS, activation='softmax', name='output_layer')(x) resnet101 = Model(inputs=resnet101.input, outputs=out) plot_model(resnet101, to_file='multiple_inputs.png', show_shapes=True, dpi=600, expand_nested=False) return resnet101, 7
def create_Classifier(args): ''' - Create the pre-trained model based on InceptionV3 - Want weights? Include: weights='imagenet') ''' weights = 'imagenet' if args.pretrained else None if args.model == "resnet50": base_model = ResNet50(weights=weights, include_top=False) elif args.model == "resnet101": base_model = ResNet101(weights=weights, include_top=False) elif args.model == "resnet152": base_model = ResNet152(weights=weights, include_top=False) else: base_model = InceptionV3(weights=weights, include_top=False) # add a global spatial average pooling layer x = base_model.output x = GlobalAveragePooling2D()(x) # let's add a fully-connected layer x = Dense(1024, activation='relu', kernel_initializer=glorot_uniform(seed=args.seed))(x) # and a logistic layer for our num_classes classes predictions = Dense(2, activation='softmax', kernel_initializer=glorot_uniform(seed=args.seed))(x) # prep model with new layers and compile model = Model(inputs=base_model.input, outputs=predictions) optimizer = optimizers.Adam(lr=args.learning_rate) model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['mse', 'accuracy']) return model
def make_encoder(self, input, name='resnet50', pretrained=True): if name == 'resnet18': from classification_models.keras import Classifiers ResNet18, _ = Classifiers.get('resnet18') model = ResNet18(weights='imagenet' if pretrained else None, input_tensor=input, include_top=False) elif name == 'resnet50': from tensorflow.keras.applications.resnet import ResNet50 model = ResNet50(weights='imagenet' if pretrained else None, input_tensor=input, include_top=False) elif name == 'resnet101': from tensorflow.keras.applications.resnet import ResNet101 model = ResNet101(weights='imagenet' if pretrained else None, input_tensor=input, include_top=False) elif name == 'resnet152': from tensorflow.keras.applications.resnet import ResNet152 model = ResNet152(weights='imagenet' if pretrained else None, input_tensor=input, include_top=False) elif name == 'vgg16': from tensorflow.keras.applications.vgg16 import VGG16 model = VGG16(weights='imagenet' if pretrained else None, input_tensor=input, include_top=False) elif name == 'vgg19': from tensorflow.keras.applications.vgg19 import VGG19 model = VGG19(weights='imagenet' if pretrained else None, input_tensor=input, include_top=False) else: raise Exception(f'unknown encoder {name}') return model
def create_model(name, **kwargs): """Create model with model's name """ assert "input_shape" in kwargs assert "num_classes" in kwargs input_shape = kwargs["input_shape"] num_classes = kwargs["num_classes"] if name == "LeNet5": model = LeNet5(input_shape=input_shape, num_classes=num_classes) elif name == "LeCunLeNet5": model = LeCunLeNet5(input_shape=input_shape, num_classes=num_classes) elif name.startswith("AttentionLeNet5"): from .attention_lenet import AttentionLeNet5 model = AttentionLeNet5(input_shape=input_shape, num_classes=num_classes, attention="senet") elif name == "ResNet18": from models.keras_fn.resnet_extension import ResNet18 model = ResNet18(include_top=True, weights=None, input_shape=input_shape, classes=num_classes) elif name == "ResNet34": from models.keras_fn.resnet_extension import ResNet34 model = ResNet34(include_top=True, weights=None, input_shape=input_shape, classes=num_classes) elif name == "ResNet50": from tensorflow.keras.applications.resnet import ResNet50 model = ResNet50(include_top=True, weights=None, input_shape=input_shape, classes=num_classes) elif name == "ResNet101": from tensorflow.keras.applications.resnet import ResNet101 model = ResNet101(include_top=True, weights=None, input_shape=input_shape, classes=num_classes) elif name == "ResNet152": from tensorflow.keras.applications.resnet import ResNet152 model = ResNet152(include_top=True, weights=None, input_shape=input_shape, classes=num_classes) elif name == "ResNet20v2": # "ResNet20v2", "ResNet56v2" # hparams: n, version, input_shape, num_classes assert "n" in kwargs assert "version" in kwargs n = kwargs["n"] version = kwargs["version"] from .fault_resnet import model_depth, resnet_v2, lr_schedule depth = model_depth(n=2, version=2) model = resnet_v2(input_shape=input_shape, depth=depth, num_classes=num_classes) # TODO optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule(0)) else: raise Exception('Unknown model: ' + name) return model
def create_resnetvoc(layer_name, number): base_model = ResNet101(weights='imagenet', include_top=False) #imports the mobilenet model # for i,layer in enumerate(base_model.layers): # print('Layer', i, layer.name) #print(len(model.layers)) #layer_name = 'conv_dw_12_relu' # intermediate_layer_model = Model(inputs=base_model.input, # outputs=base_model.get_layer(layer_name).output) # add a global spatial average pooling layer x = base_model.output # x = base_model.get_layer(layer_name).output x = GlobalAveragePooling2D()(x) # let's add a fully-connected layer x = Dense(1024, activation='relu')( x ) #we add dense layers so that the model can learn more complex functions and classify for better results. x = Dense(1024, activation='relu')(x) #dense layer 2 x = Dense(1024, activation='relu')(x) # dense layer 2 x = Dense(512, activation='relu')(x) #dense layer 3 # and a logistic layer -- let's say we have 20 voc classes preds = Dense(20, activation='softmax')(x) model = Model(inputs=base_model.input, outputs=preds ) ##now a model has been created based on our architecture for i, layer in enumerate(model.layers): print('Final Model*****', i, layer.name) print(len(model.layers)) # for i,layer in enumerate(base_model.layers): # print('Original Model*****', i, layer.name) # first: train only the top layers (which were randomly initialized) # i.e. freeze all convolutional InceptionV3 layers for layer in base_model.layers: layer.trainable = False # the first 249 layers and unfreeze the rest: # for layer in model.layers[:20]: # layer.trainable = False # for layer in model.layers[20:]: # layer.trainable = True #opt = SGD(learning_rate=0.01, momentum=0.0, nesterov=False, name='SGD') model.compile(optimizer=SGD(learning_rate=0.01, momentum=0.0, nesterov=False, name='SGD'), loss='categorical_crossentropy', metrics=['accuracy']) # Adam optimizer # loss function will be categorical cross entropy # evaluation metric will be accuracy # call the dataset train_datagen = ImageDataGenerator( preprocessing_function=preprocess_input) #included in our dependencies train_generator = train_datagen.flow_from_directory( '/home/dhaval/piyush/Usecases_dataset/voc_dataset_created/training_data', target_size=(224, 224), color_mode='rgb', batch_size=32, class_mode='categorical', shuffle=True) validation_generator = train_datagen.flow_from_directory( '/home/dhaval/piyush/Usecases_dataset/voc_dataset_created/validation_data', target_size=(224, 224), color_mode='rgb', batch_size=32, class_mode='categorical', shuffle=True) step_size_train = train_generator.n // train_generator.batch_size step_size_val = validation_generator.n // validation_generator.batch_size tensorboard = TensorBoard(log_dir="logs/{}".format(time()), update_freq='epoch', profile_batch=0) #fit the model model.fit(train_generator, steps_per_epoch=step_size_train, epochs=50, validation_data=validation_generator, validation_steps=step_size_val, callbacks=[tensorboard]) #model.fit(train_generator,steps_per_epoch=step_size_train,epochs=12) model.save('resnet101_model_voc_20class_ep_50_sgd_layer_' + str(len(model.layers)) + '.h5')
def get_model(architecture, iteracion, models_info, pipeline): print("=" * len(architecture)) print(architecture) print("=" * len(architecture)) if iteracion > 0 and not pipeline["restart_weights"]: print("USING MODELS FROM MEMORY") base_model = models_info[architecture]['model_memory'] print("OK - USING MODELS FROM MEMORY") if architecture == 'InceptionV3': from tensorflow.keras.applications.inception_v3 import InceptionV3, preprocess_input if iteracion == 0 or pipeline["restart_weights"]: base_model = InceptionV3(weights=pipeline['weights'], include_top=False, input_shape=(pipeline['img_height'], pipeline['img_width'], 3)) print(f"OK - RESTARTING WEIGHTS FROM IMAGENET FOR {architecture}") if architecture == 'InceptionV4': from tensorflow.keras.applications.inception_resnet_v2 import InceptionResNetV2, preprocess_input if iteracion == 0 or pipeline["restart_weights"]: base_model = InceptionResNetV2(weights=pipeline['weights'], include_top=False, input_shape=(pipeline['img_height'], pipeline['img_width'], 3)) print(f"OK - RESTARTING WEIGHTS FROM IMAGENET FOR {architecture}") if architecture == 'ResNet50': from tensorflow.keras.applications.resnet import ResNet50, preprocess_input if iteracion == 0 or pipeline["restart_weights"]: base_model = ResNet50(weights=pipeline['weights'], include_top=False, input_shape=(pipeline['img_height'], pipeline['img_width'], 3)) if architecture == 'ResNet101': from tensorflow.keras.applications.resnet import ResNet101, preprocess_input if iteracion == 0 or pipeline["restart_weights"]: base_model = ResNet101(weights=pipeline['weights'], include_top=False, input_shape=(pipeline['img_height'], pipeline['img_width'], 3)) if architecture == 'ResNet152': from tensorflow.keras.applications.resnet import ResNet152, preprocess_input if iteracion == 0 or pipeline["restart_weights"]: base_model = ResNet152(weights=pipeline['weights'], include_top=False, input_shape=(pipeline['img_height'], pipeline['img_width'], 3)) print(f"OK - RESTARTING WEIGHTS FROM IMAGENET FOR {architecture}") if architecture == 'DenseNet121': from tensorflow.keras.applications.densenet import DenseNet121, preprocess_input if iteracion == 0 or pipeline["restart_weights"]: base_model = DenseNet121(weights=pipeline['weights'], include_top=False, input_shape=(pipeline['img_height'], pipeline['img_width'], 3)) if architecture == 'DenseNet169': from tensorflow.keras.applications.densenet import DenseNet169, preprocess_input if iteracion == 0 or pipeline["restart_weights"]: base_model = DenseNet169(weights=pipeline['weights'], include_top=False, input_shape=(pipeline['img_height'], pipeline['img_width'], 3)) if architecture == 'DenseNet201': from tensorflow.keras.applications.densenet import DenseNet201, preprocess_input if iteracion == 0 or pipeline["restart_weights"]: base_model = DenseNet201(weights=pipeline['weights'], include_top=False, input_shape=(pipeline['img_height'], pipeline['img_width'], 3)) if architecture == 'NASNetLarge': from tensorflow.keras.applications.nasnet import NASNetLarge, preprocess_input if iteracion == 0 or pipeline["restart_weights"]: base_model = NASNetLarge(weights=pipeline['weights'], include_top=False, input_shape=(pipeline['img_height'], pipeline['img_width'], 3)) if architecture == 'Xception': from tensorflow.keras.applications.xception import Xception, preprocess_input if iteracion == 0 or pipeline["restart_weights"]: base_model = Xception(weights=pipeline['weights'], include_top=False, input_shape=(pipeline['img_height'], pipeline['img_width'], 3)) return base_model, preprocess_input
def download_for_url(self, path: str, **kwargs): """ Download the file at the given URL :param path: the path to download :param kwargs: various kwargs for customizing the underlying behavior of the model download and setup :return: the absolute path to the model """ path_split = path.split('/') type = path_split[0] weights_file = path_split[1] include_top = 'no_top' in weights_file if type == 'vgg19': ret = VGG19(include_top=include_top, **kwargs) elif type == 'vgg16': ret = VGG16(include_top=include_top, **kwargs) elif type == 'resnet50': ret = ResNet50(include_top=include_top, **kwargs) elif type == 'resnet101': ret = ResNet101(include_top=include_top, **kwargs) elif type == 'resnet152': ret = ResNet152(include_top=include_top, **kwargs) elif type == 'resnet50v2': ret = ResNet50V2(include_top=include_top, **kwargs) elif type == 'resnet101v2': ret = ResNet101V2(include_top=include_top, **kwargs) elif type == 'resnet152v2': ret = ResNet152V2(include_top=include_top, **kwargs) elif type == 'densenet121': ret = DenseNet121(include_top=include_top) elif type == 'densenet169': ret = DenseNet169(include_top=include_top, **kwargs) elif type == 'densenet201': ret = DenseNet201(include_top=include_top, **kwargs) elif type == 'inceptionresnetv2': ret = InceptionResNetV2(include_top=include_top, **kwargs) elif type == 'efficientnetb0': ret = EfficientNetB0(include_top=include_top, **kwargs) elif type == 'efficientnetb1': ret = EfficientNetB1(include_top=include_top, **kwargs) elif type == 'efficientnetb2': ret = EfficientNetB2(include_top=include_top, **kwargs) elif type == 'efficientnetb3': ret = EfficientNetB3(include_top=include_top, **kwargs) elif type == 'efficientnetb4': ret = EfficientNetB4(include_top=include_top, **kwargs) elif type == 'efficientnetb5': ret = EfficientNetB5(include_top=include_top, **kwargs) elif type == 'efficientnetb6': ret = EfficientNetB6(include_top=include_top, **kwargs) elif type == 'efficientnetb7': efficient_net = EfficientNetB7(include_top=include_top, **kwargs) elif type == 'mobilenet': ret = MobileNet(include_top=include_top, **kwargs) elif type == 'mobilenetv2': ret = MobileNetV2(include_top=include_top) # MobileNetV3() missing 2 required positional arguments: 'stack_fn' and 'last_point_ch' #elif type == 'mobilenetv3': # mobile_net = MobileNetV3(include_top=include_top, **kwargs) elif type == 'inceptionv3': ret = InceptionV3(include_top=include_top, **kwargs) elif type == 'nasnet': ret = NASNetLarge(include_top=include_top, **kwargs) elif type == 'nasnet_mobile': ret = NASNetMobile(include_top=include_top, **kwargs) elif type == 'xception': ret = Xception(include_top=include_top, **kwargs) model_path = os.path.join(keras_path, weights_file) ret.save(model_path) return model_path
def main(): batch_size = 100 max_length = 15 word_count_threshold = 1 base_model = ResNet101(weights='imagenet') # base_model.summary() print(os.cpu_count()) print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU'))) # exit() #change layer from which output features is to be taken model = Model(inputs=base_model.input, outputs=base_model.get_layer('conv5_block3_add').output) image_dir = 'image/%2014_resized/' # about 80000 images and 400000 captions for train dataset # train_dataset = _process_caption_data(caption_file='data/annotations/captions_train2014.json', # image_dir='image/train2014_resized/', # max_length=max_length) # val_dataset = _process_caption_data(caption_file='data/annotations/captions_val2014.json', # image_dir='image/val2014_resized/', # max_length=max_length) # val_cutoff = int(0.1 * len(val_dataset)) # test_cutoff = int(0.2 * len(val_dataset)) # print 'Finished processing caption data' # save_pickle(train_dataset, 'data/train/train.annotations.pkl') # save_pickle(val_dataset[:val_cutoff], 'data/val/val.annotations.pkl') # save_pickle(val_dataset[val_cutoff:test_cutoff].reset_index(drop=True), 'data/test/test.annotations.pkl') for split in ['train_hindi_corrected', 'val_hindi', 'test_hindi']: annotations = load_pickle('./data/%s/%s.annotations.pkl' % (split, split)) if split == 'train_hindi': word_to_idx = _build_vocab(annotations=annotations, threshold=word_count_threshold) save_pickle(word_to_idx, './data/%s/word_to_idx.pkl' % split) captions = _build_caption_vector(annotations=annotations, word_to_idx=word_to_idx, max_length=max_length) save_pickle(captions, './data/%s/%s.captions.pkl' % (split, split)) file_names, id_to_idx = _build_file_names(annotations) save_pickle(file_names, './data/%s/%s.file.names.pkl' % (split, split)) image_idxs = _build_image_idxs(annotations, id_to_idx) save_pickle(image_idxs, './data/%s/%s.image.idxs.pkl' % (split, split)) image_ids = {} feature_to_captions = {} i = -1 for caption, image_id in zip(annotations['caption'], annotations['image_id']): if not image_id in image_ids: image_ids[image_id] = 0 i += 1 feature_to_captions[i] = [] feature_to_captions[i].append(caption.lower() + ' .') save_pickle(feature_to_captions, './data/%s/%s.references.pkl' % (split, split)) print("Finished building %s caption dataset" % split)
def _createResnetBackbone(self, output_stride=16, depth='101'): resnet101 = ResNet101(weights='imagenet', input_shape=(self.dl_input_shape[1], self.dl_input_shape[2], 3), include_top=False) assert output_stride in [ 8, 16 ], "Only suported output_stride= 8 o 16 for backbone resnet." resnet101_config = resnet101.get_config() resnet101_weights = resnet101.get_weights() #tf.keras.backend.clear_session() output_stride = 8 dilatation = 1 stride_enable = False for layer in resnet101_config['layers']: if layer['name'] == 'input_1': layer['config']['batch_input_shape'] = ( None, self.dl_input_shape[-3], self.dl_input_shape[-2], self.dl_input_shape[-1]) self.logger.info(layer['name'] + ', ' + str(layer['config']['batch_input_shape'])) if output_stride == 8 and (layer['name'] == 'conv4_block1_1_conv' or layer['name'] == 'conv4_block1_0_conv'): layer['config']['strides'] = (1, 1) self.logger.info(layer['name'] + ', strides=' + str(layer['config']['strides']) + ', ' + str(layer['config']['dilation_rate'])) stride_enable = True if layer['name'] == 'conv4_block1_1_conv': dilatation = dilatation * 2 #Replace stride for dilatation elif output_stride in [8, 16]: if layer['name'] == 'conv5_block1_1_conv' or layer[ 'name'] == 'conv5_block1_0_conv': self.logger.info(layer['name'] + ', ' + str(layer['config']['strides']) + ', ' + str(layer['config']['dilation_rate'])) layer['config']['strides'] = (1, 1) self.logger.info(layer['name'] + ', ' + str(layer['config']['strides']) + ', ' + str(layer['config']['dilation_rate'])) stride_enable = True if layer['name'] == 'conv5_block1_1_conv': dilatation = dilatation * 2 #Replace stride for dilatation elif stride_enable and ('_conv' in layer['name']): if layer['config']['kernel_size'] != (1, 1): layer['config']['dilation_rate'] = (dilatation, dilatation) self.logger.info(layer['name'] + ', ' + str(layer['config']['strides']) + ', ' + str(layer['config']['dilation_rate'])) else: self.logger.info(layer['name'] + ', ' + str(layer['config']['strides']) + ', ' + str(layer['config']['dilation_rate'])) self.backbone = Model.from_config(resnet101_config) resnet101_weights[0] = np.resize(resnet101_weights[0], [ resnet101_weights[0].shape[0], resnet101_weights[0].shape[1], self.dl_input_shape[-1], resnet101_weights[0].shape[-1] ]) #update input to suport 4 channels self.backbone.set_weights(resnet101_weights) self.strideOutput32LayerName = 'conv5_block3_out' self.strideOutput16LayerName = 'conv4_block23_out' self.strideOutput8LayerName = 'conv3_block4_out' self.inputLayerName = resnet101.layers[0].name
def get_resnet(classes=54, depth=50, input_shape=(224, 224, 3), base_layer_trainable=False): assert depth in [50, 101, 152] from tensorflow.keras.applications.resnet import ResNet50, ResNet101, ResNet152 if depth == 50: base_model = ResNet50(include_top=False, input_shape=input_shape) for layer in base_model.layers: layer.trainable = base_layer_trainable head_model = KL.GlobalMaxPool2D()(base_model.output) head_model = KL.Dense(1024, activation='relu', name='00', kernel_initializer='he_uniform')(head_model) head_model = KL.Dropout(0.5)(head_model) # head_model = KL.Dense(1024, activation='relu', name='1111', kernel_initializer='he_uniform')(head_model) # head_model = KL.Dropout(0.5)(head_model) if classes == 2: head_model = KL.Dense(classes, activation='sigmoid', name='3333')(head_model) else: head_model = KL.Dense(classes, activation='softmax', name='3333')(head_model) model = KM.Model(inputs=base_model.input, outputs=head_model) return model elif depth == 101: base_model = ResNet101(include_top=False, input_shape=input_shape) for layer in base_model.layers: layer.trainable = base_layer_trainable head_model = KL.GlobalMaxPool2D()(base_model.output) head_model = KL.Dense(1024, activation='relu', name='00', kernel_initializer='he_uniform')(head_model) head_model = KL.Dropout(0.5)(head_model) # head_model = KL.Dense(1024, activation='relu', name='1111', kernel_initializer='he_uniform')(head_model) # head_model = KL.Dropout(0.5)(head_model) if classes == 2: head_model = KL.Dense(classes, activation='sigmoid', name='3333')(head_model) else: head_model = KL.Dense(classes, activation='softmax', name='3333')(head_model) model = KM.Model(inputs=base_model.input, outputs=head_model) return model else: base_model = ResNet152(include_top=False, input_shape=input_shape) for layer in base_model.layers: layer.trainable = base_layer_trainable head_model = KL.GlobalMaxPool2D()(base_model.output) head_model = KL.Dense(1024, activation='relu', name='00', kernel_initializer='he_uniform')(head_model) head_model = KL.Dropout(0.5)(head_model) head_model = KL.Dense(1024, activation='relu', name='11', kernel_initializer='he_uniform')(head_model) head_model = KL.Dropout(0.5)(head_model) if classes == 2: head_model = KL.Dense(classes, activation='sigmoid', name='3333')(head_model) else: head_model = KL.Dense(classes, activation='softmax', name='3333')(head_model) model = KM.Model(inputs=base_model.input, outputs=head_model) return model
from tensorflow.keras.layers import Conv2D,MaxPooling2D,Flatten,Dense,Dropout from tensorflow.keras.preprocessing.image import ImageDataGenerator from tensorflow.keras.models import load_model from tensorflow.keras.applications.mobilenet_v2 import MobileNetV2 from tensorflow.keras.preprocessing import image import matplotlib.pyplot as plt import numpy as np import random import os import tensorflow as tf from tensorflow.keras import Model ResNet_101 = ResNet101(input_shape=[256,256,3] , weights='imagenet' , include_top=False) for layer in ResNet_101.layers: layer.trainable = False x = Flatten()(ResNet_101.output) pred = Dense(2, activation='sigmoid')(x) model = Model(inputs=ResNet_101.input, outputs=pred) # model.summary() optimizer = tf.keras.optimizers.Adam(lr=.0001, clipnorm=0.0001) ########################## Image DataGenerator ########################## model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy'])
import tensorflow as tf from tensorflow.keras.applications.resnet import ResNet101, ResNet50 from tensorflow.keras.applications.mobilenet import MobileNet from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 # model = ResNet50(weights=None) model = ResNet101() # model = MobileNet() full_model = tf.function(lambda x: model(x)) full_model = full_model.get_concrete_function( tf.TensorSpec([None, 224, 224, 3], model.input[0].dtype)) frozen_func = convert_variables_to_constants_v2(full_model) frozen_func.graph.as_graph_def() layers = [op.name for op in frozen_func.graph.get_operations()] print("-" * 50) print("Frozen model layers: ") for layer in layers: print(layer) print("-" * 50) print("Frozen model inputs: ") print(frozen_func.inputs ) # [<tf.Tensor 'x:0' shape=(None, 224, 224, 3) dtype=float32>] print("Frozen model outputs: ") print(frozen_func.outputs ) # [<tf.Tensor 'Identity:0' shape=(None, 1000) dtype=float32>] tf.io.write_graph(graph_or_graph_def=frozen_func.graph, logdir="./frozen_models",
zoom_range=0.15, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.15, horizontal_flip=True, fill_mode="nearest") test_gen = ImageDataGenerator() train1 = gen1.flow(x_train1) test1 = test_gen.flow(x_test1) ################################## Feature Extraction ###################################### # Feature extract from resnet101 model1 = ResNet101(weights='imagenet', include_top=False, input_shape=(32, 32, 3)) x_train = model1.predict(x_train) x_test = model1.predict(x_test) print(x_train.shape) # Feature extract from Inceptionv3 model2 = InceptionV3(weights='imagenet', include_top=False, input_shape=(75, 75, 3)) x_train2 = model2.predict(x_train1) x_test2 = model2.predict(x_test1) print(x_train2.shape) # Merge both the predicted outputs x_train = np.concatenate((x_train, x_train2), axis=3) x_test = np.concatenate((x_test, x_test2), axis=3)
batch_size=batch_size, class_mode='categorical', target_size=(img_height, img_width) ) # In[8]: callbacks = EarlyStopping(monitor='val_loss', patience=5, verbose=1, mode='auto') # autosave best Model best_model_file = '.../resnet101_drop_batch_best_weights_256.h5' best_model = ModelCheckpoint(best_model_file, monitor='val_acc', verbose = 1, save_best_only = True) # In[9]: wp = '.../resnet101_weights_tf_dim_ordering_tf_kernels_notop.h5' resnet101_base = ResNet101(include_top=False, weights=wp, input_tensor=None, input_shape=(img_height, img_width,3)) # In[10]: print('Adding new layers...') output = resnet101_base.get_layer(index = -1).output output = Flatten()(output) # let's add a fully-connected layer output = Dense(512,activation = "relu")(output) output = BatchNormalization()(output) output = Dropout(0.2)(output) output = Dense(512,activation = "relu")(output) output = BatchNormalization()(output) output = Dropout(0.2)(output) # and a logistic layer -- let's say we have 4 classes output = Dense(5, activation='softmax')(output)
def get_resnet101(image_shape): from tensorflow.keras.applications.resnet import ResNet101 model = ResNet101(weights='imagenet', include_top=False, input_shape=(image_shape)) return model