def build_model(classes=2): inputs = Input(shape=(IMAGE_SIZE, IMAGE_SIZE, 3)) x = preprocess_input(inputs) x = ResNet101V2(weights=None, classes=classes)(x) model = Model(inputs=inputs, outputs=x) model.compile(loss='categorical_crossentropy', metrics=['accuracy']) return model
def DeepLabV3PlusUNet(input_shape, classes=66, *args, **kwargs): input = tf.keras.Input(shape=input_shape) x = GaussianNoise(0.1)(input) base_model = ResNet101V2(input_tensor=x, include_top=False) # base_model.summary() skip_connections = [ base_model.get_layer('conv1_conv').output, # (None, 128, 128, 64) base_model.get_layer('conv2_block2_out').output, # (None, 64, 64, 256) base_model.get_layer('conv3_block3_out').output, # (None, 32, 32, 512) base_model.get_layer( 'conv4_block22_out').output, # (None, 16, 16, 1024) ] image_features = base_model.output # (None, 8, 8, 2048) x_a = ASPP(image_features) # (None, 8, 8, 2048) output = Concatenate()([image_features, x_a]) for c in (1024, 512, 256, 64): a = upsample_by_cnn(output, c) b = skip_connections.pop() print(a.name, a.shape) print(b.name, b.shape) output = Concatenate()([a, b]) # output = Concatenate()([upsample_by_cnn(output, c), skip_connections.pop()]) x = upsample_by_cnn(output, 32) x = Conv2D(classes, (1, 1), name='output_layer')(x) x = Activation('softmax', dtype='float32')(x) model = Model(inputs=input, outputs=x, name='DeepLabV3_Plus') print(f'*** Output_Shape => {model.output_shape} ***') return model
def res(lr): with strategy.scope(): conv_base = ResNet101V2(weights='imagenet', include_top=False , input_shape=[*IMAGE_SIZE, 3]) conv_base.trainable = True set_trainable = False for layer in conv_base.layers: if layer.name == 'conv5_block1_preact_bn': set_trainable = True if set_trainable: layer.trainable = True else: layer.trainable = False input = tf.keras.layers.Input(shape=(512,512,3)) res = conv_base(input) gap = tf.keras.layers.GlobalAveragePooling2D()(res) dropout = tf.keras.layers.Dropout(0.25)(gap) output = tf.keras.layers.Dense(104, activation='softmax')(dropout) model = tf.keras.models.Model(input,output) model.compile( optimizer=tf.keras.optimizers.Adam(lr=lr), loss = 'sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'] ) return model
def __init__(self, model_name=None): if model_name == 'Xception': base_model = Xception(weights='imagenet') self.preprocess_input = xception.preprocess_input elif model_name == 'VGG19': base_model = VGG19(weights='imagenet') self.preprocess_input = vgg19.preprocess_input elif model_name == 'ResNet50': base_model = ResNet50(weights='imagenet') self.preprocess_input = resnet.preprocess_input elif model_name == 'ResNet101': base_model = ResNet101(weights='imagenet') self.preprocess_input = resnet.preprocess_input elif model_name == 'ResNet152': base_model = ResNet152(weights='imagenet') self.preprocess_input = resnet.preprocess_input elif model_name == 'ResNet50V2': base_model = ResNet50V2(weights='imagenet') self.preprocess_input = resnet_v2.preprocess_input elif model_name == 'ResNet101V2': base_model = ResNet101V2(weights='imagenet') self.preprocess_input = resnet_v2.preprocess_input elif model_name == 'ResNet152V2': base_model = ResNet152V2(weights='imagenet') self.preprocess_input = resnet_v2.preprocess_input elif model_name == 'InceptionV3': base_model = InceptionV3(weights='imagenet') self.preprocess_input = inception_v3.preprocess_input elif model_name == 'InceptionResNetV2': base_model = InceptionResNetV2(weights='imagenet') self.preprocess_input = inception_resnet_v2.preprocess_input elif model_name == 'DenseNet121': base_model = DenseNet121(weights='imagenet') self.preprocess_input = densenet.preprocess_input elif model_name == 'DenseNet169': base_model = DenseNet169(weights='imagenet') self.preprocess_input = densenet.preprocess_input elif model_name == 'DenseNet201': base_model = DenseNet201(weights='imagenet') self.preprocess_input = densenet.preprocess_input elif model_name == 'NASNetLarge': base_model = NASNetLarge(weights='imagenet') self.preprocess_input = nasnet.preprocess_input elif model_name == 'NASNetMobile': base_model = NASNetMobile(weights='imagenet') self.preprocess_input = nasnet.preprocess_input elif model_name == 'MobileNet': base_model = MobileNet(weights='imagenet') self.preprocess_input = mobilenet.preprocess_input elif model_name == 'MobileNetV2': base_model = MobileNetV2(weights='imagenet') self.preprocess_input = mobilenet_v2.preprocess_input else: base_model = VGG16(weights='imagenet') self.preprocess_input = vgg16.preprocess_input self.model = Model(inputs=base_model.input, outputs=base_model.layers[-2].output)
def resnet101V2Model(): baseModel = ResNet101V2( weights=None, include_top=False, input_shape=(32, 32, 3), ) model_input = Input(shape=(32, 32, 3)) x = baseModel(model_input) x = GlobalAveragePooling2D()(x) model_output = Dense(10, activation="softmax")(x) model = Model(inputs=model_input, outputs=model_output) return model
def transfer_resnet101v2(): resnet101v2 = ResNet101V2(include_top=False,weights='imagenet',input_shape=(160,160,3)) resnet101v2_preprocess = tf.keras.applications.resnet50.preprocess_input inputs = tf.keras.Input(shape=(160,160,3)) x = resnet101v2_preprocess(inputs) x = resnet101v2(inputs,training=False) x = tf.keras.layers.GlobalAveragePooling2D()(x) outputs = tf.keras.layers.Dense(units=1,activation='sigmoid')(x) custom_resnet101v2 = tf.keras.Model(inputs,outputs) custom_resnet101v2.summary() return custom_resnet101v2
def get_encoder_model(name, in_shape, pooling): if name == "InceptionV3": model = InceptionV3(include_top=False, input_shape=in_shape, weights=None, pooling=pooling) elif name == "ResNet50": model = ResNet50(include_top=False, input_shape=in_shape, weights=None, pooling=pooling) elif name == "ResNet50V2": model = ResNet50V2(include_top=False, input_shape=in_shape, weights=None, pooling=pooling) elif name == "ResNet101": model = ResNet101(include_top=False, input_shape=in_shape, weights=None, pooling=pooling) elif name == "ResNet101V2": model = ResNet101V2(include_top=False, input_shape=in_shape, weights=None, pooling=pooling) elif name == "ResNet152": model = ResNet152(include_top=False, input_shape=in_shape, weights=None, pooling=pooling) elif name == "InceptionResNetV2": model = InceptionResNetV2(include_top=False, input_shape=in_shape, weights=None, pooling=pooling) elif name == "DenseNet121": model = DenseNet121(include_top=False, input_shape=in_shape, weights=None, pooling=pooling) else: raise ValueError("model " + name + " not found") return model
def __init__(self, weights_init, model_architecture='vgg16'): self.weights_init = weights_init if model_architecture == 'vgg16': self.model = VGG16(weights=self.weights_init, include_top=False) self.bridge_list = [2, 5, 9, 13, 17] elif model_architecture == 'vgg19': self.model = VGG19(weights=self.weights_init, include_top=False) self.bridge_list = [2, 5, 10, 15, 20] elif model_architecture == 'resnet50': self.model = ResNet50(weights=self.weights_init, include_top=False) self.bridge_list = [4, 38, 80, 142, -1] elif model_architecture == 'resnet50v2': self.model = ResNet50V2(weights=self.weights_init, include_top=False) self.bridge_list = [2, 27, 62, 108, -1] elif model_architecture == 'resnet101': self.model = ResNet101(weghts=self.weights_init, include_top=False) self.bridge_list = [4, 38, 80, 312, -1] elif model_architecture == 'resnet101v2': self.model = ResNet101V2(weights=self.weights_init, include_top=False) self.bridge_list = [2, 27, 62, 328, -1] elif model_architecture == 'resnet152': self.model = ResNet152(weights=self.weights_init, include_top=False) self.bridge_list = [4, 38, 120, 482, -1] elif model_architecture == 'resnet152v2': self.model = ResNet152V2(weights=self.weights_init, include_top=False) self.bridge_list = [2, 27, 117, 515, -1]
def construct_model(pretrainedNN): model = Sequential() if (pretrainedNN == 'VGG16'): model.add( VGG16(weights=None, include_top=False, input_shape=(32, 32, 3))) elif (pretrainedNN == 'VGG19'): model.add( VGG19(weights=None, include_top=False, input_shape=(32, 32, 3))) elif (pretrainedNN == 'ResNet101'): model.add( ResNet101(weights=None, include_top=False, input_shape=(32, 32, 3))) elif (pretrainedNN == 'ResNet152'): model.add( ResNet152(weights=None, include_top=False, input_shape=(32, 32, 3))) elif (pretrainedNN == 'ResNet50V2'): model.add( ResNet50V2(weights=None, include_top=False, input_shape=(32, 32, 3))) elif (pretrainedNN == 'ResNet101V2'): model.add( ResNet101V2(weights=None, include_top=False, input_shape=(32, 32, 3))) elif (pretrainedNN == 'ResNet152V2'): model.add( ResNet152V2(weights=None, include_top=False, input_shape=(32, 32, 3))) elif (pretrainedNN == 'MobileNet'): model.add( MobileNet(weights=None, include_top=False, input_shape=(32, 32, 3))) elif (pretrainedNN == 'MobileNetV2'): model.add( MobileNetV2(weights=None, include_top=False, input_shape=(32, 32, 3))) elif (pretrainedNN == 'DenseNet121'): model.add( DenseNet121(weights=None, include_top=False, input_shape=(32, 32, 3))) elif (pretrainedNN == 'DenseNet169'): model.add( DenseNet169(weights=None, include_top=False, input_shape=(32, 32, 3))) elif (pretrainedNN == 'DenseNet201'): model.add( DenseNet201(weights=None, include_top=False, input_shape=(32, 32, 3))) else: model.add( ResNet50(weights=None, include_top=False, input_shape=(32, 32, 3))) model.add(Flatten()) model.add(Dense(77, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy']) return model
def DeepLabV3Plus(input_shape, classes=66, *args, **kwargs): print('*** Building DeepLabv3Plus Network ***') img_height = input_shape[0] img_width = input_shape[1] # base_model = ResNet50(input_shape=input_shape, weights=None, include_top=False) base_model = ResNet101V2(input_shape=input_shape, include_top=False) base_model.summary() image_features = base_model.output # tf.keras.utils.plot_model(base_model, 'ResNet101V2.png') x_a = ASPP(image_features) x_a = Upsample(tensor=x_a, size=[img_height // 4, img_width // 4]) # (None, 64, 64, 256) x_b = base_model.get_layer('conv2_block2_out').output x_b = Conv2D(filters=48, kernel_size=1, padding='same', kernel_initializer='he_normal', name='low_level_projection', use_bias=False)(x_b) x_b = BatchNormalization(name=f'bn_low_level_projection')(x_b) x_b = Activation('relu', name='low_level_activation')(x_b) x = concatenate([x_a, x_b], name='decoder_concat') x = Conv2D(filters=256, kernel_size=3, padding='same', activation='relu', kernel_initializer='he_normal', name='decoder_conv2d_1', use_bias=False)(x) x = BatchNormalization(name=f'bn_decoder_1')(x) x = Activation('relu', name='activation_decoder_1')(x) x = Conv2D(filters=256, kernel_size=3, padding='same', activation='relu', kernel_initializer='he_normal', name='decoder_conv2d_2', use_bias=False)(x) x = BatchNormalization(name=f'bn_decoder_2')(x) x = Activation('relu', name='activation_decoder_2')(x) x = Upsample(x, [img_height, img_width]) x = Conv2D(classes, (1, 1), name='output_layer')(x) # x = Activation('sigmoid')(x) x = Activation('softmax', dtype='float32')(x) ''' x = Activation('softmax')(x) tf.losses.SparseCategoricalCrossentropy(from_logits=True) Args: from_logits: Whether `y_pred` is expected to be a logits tensor. By default, we assume that `y_pred` encodes a probability distribution. ''' model = Model(inputs=base_model.input, outputs=x, name='DeepLabV3_Plus') print(f'*** Output_Shape => {model.output_shape} ***') return model
from tensorflow.keras.layers import Input, Dense, Conv2D, Dropout from tensorflow.keras.layers import Flatten, BatchNormalization from tensorflow.keras.layers import MaxPooling2D, AveragePooling2D from tensorflow.keras.layers import concatenate, Activation from tensorflow.keras.optimizers import Adam from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping from tensorflow.keras.callbacks import ReduceLROnPlateau from tensorflow.keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array from tensorflow.keras.utils import plot_model, to_categorical from tensorflow.keras.models import Model from tensorflow.keras.applications.resnet50 import ResNet50 from tensorflow.keras.applications import ResNet101V2 from tensorflow.keras.models import Sequential convlayer = ResNet101V2(input_shape=(224, 224, 3), weights='imagenet', include_top=False) model_resnet = Sequential() model_resnet.add(convlayer) model_resnet.add(Dropout(0.5)) model_resnet.add(Flatten()) model_resnet.add(BatchNormalization()) model_resnet.add(Dense(2048, kernel_initializer='he_uniform')) model_resnet.add(BatchNormalization()) model_resnet.add(Activation('relu')) model_resnet.add(Dropout(0.5)) model_resnet.add(Dense(1024, kernel_initializer='he_uniform')) model_resnet.add(BatchNormalization()) model_resnet.add(Activation('relu')) model_resnet.add(Dropout(0.5)) model_resnet.add(Dense(230, activation='softmax'))
return shuffle(np.asarray(lfilenames), np.asarray(labels)), np.asarray(lab) else: return (np.asarray(lfilenames), np.asarray(labels)), np.asarray(lab) from tensorflow.keras.applications import ResNet101V2 batchsize = 1 cur_root = 'E:\\Project\\08-TF\\TF2\\ZSL_TF2\\CUBfeature\\' VC_dir = 'E:\\Project\\08-TF\\TF2\\ZSL_TF2\\CUBVCfeature\\' os.makedirs(cur_root, exist_ok=True) #创建目录,用于存放视觉特征 dataset_path = "E:\\Project\\08-TF\\TF2\\Caltech-UCSD-Birds-200-2011\\CUB_200_2011\\images\\" image_model = ResNet101V2(weights='resnet.h5', include_top=False, pooling='avg') new_input = image_model.input hidden_layer = image_model.layers[-1].output #获取ResNet的倒数第二层(池化前的卷积结果) image_features_extract_model = tf.keras.Model(new_input, hidden_layer) size = [224, 224] def load_image(image_path): img = tf.io.read_file(image_path) img = tf.image.decode_jpeg(img, channels=3) img = tf.image.resize(img, size) img = tf.keras.applications.resnet_v2.preprocess_input( img) #(shape is (224,224,3))
sess.run(tf.compat.v1.global_variables_initializer()) #初始化 try: for step in np.arange(1): value = sess.run(next_batch_train) showimg(step, value[1], np.asarray((value[0] + 1) * 127.5, np.uint8), 10) #显示图片 except tf.errors.OutOfRangeError: #捕获异常 print("Done!!!") img_size = (224, 224, 3) inputs = tf.keras.Input(shape=img_size) image_model = ResNet101V2( weights='resnet101v2_weights_tf_dim_ordering_tf_kernels_notop.h5', input_tensor=inputs, input_shape=img_size, include_top=False) model = tf.keras.models.Sequential() model.add(image_model) model.add(tf.keras.layers.Flatten()) model.add(tf.keras.layers.Dense(150, activation='softmax')) image_model.trainable = False model.summary() model.compile(loss='categorical_crossentropy', optimizer=tf.keras.optimizers.Adam(lr=0.001), metrics=['acc']) #训练模型 model_dir = "./models/test" os.makedirs(model_dir, exist_ok=True)
weights='imagenet')), ("MobileNet", MobileNet(input_shape=IMG_SHAPE, include_top=False, weights='imagenet')), ("MobileNetV2", MobileNetV2(input_shape=IMG_SHAPE, include_top=False, weights='imagenet')), ("ResNet101", ResNet101(input_shape=IMG_SHAPE, include_top=False, weights='imagenet')), ("ResNet101V2", ResNet101V2(input_shape=IMG_SHAPE, include_top=False, weights='imagenet')), ("ResNet152", ResNet152(input_shape=IMG_SHAPE, include_top=False, weights='imagenet')), ("ResNet152V2", ResNet152V2(input_shape=IMG_SHAPE, include_top=False, weights='imagenet')), ("ResNet50", ResNet50(input_shape=IMG_SHAPE, include_top=False, weights='imagenet')), ("ResNet50V2", ResNet50V2(input_shape=IMG_SHAPE,
def backbone(x_in): if backbone_type == 'ResNet50': return ResNet50(input_shape=x_in.shape[1:], include_top=False, weights=weights)(x_in) elif backbone_type == 'ResNet50V2': return ResNet50V2(input_shape=x_in.shape[1:], include_top=False, weights=weights)(x_in) elif backbone_type == 'ResNet101V2': return ResNet101V2(input_shape=x_in.shape[1:], include_top=False, weights=weights)(x_in) elif backbone_type == 'InceptionResNetV2': return InceptionResNetV2(input_shape=x_in.shape[1:], include_top=False, weights=weights)(x_in) elif backbone_type == 'InceptionV3': return InceptionV3(input_shape=x_in.shape[1:], include_top=False, weights=weights)(x_in) elif backbone_type == 'MobileNet': return MobileNet(input_shape=x_in.shape[1:], include_top=False, weights=weights)(x_in) elif backbone_type == 'MobileNetV2': return MobileNetV2(input_shape=x_in.shape[1:], include_top=False, weights=weights)(x_in) elif backbone_type == 'NASNetLarge': model = NASNetLarge(input_shape=x_in.shape[1:], include_top=False, weights=None) model.load_weights(WEIGHTS_DIR + "nasnet_large_no_top.h5") return model(x_in) elif backbone_type == 'NASNetMobile': model = NASNetMobile(input_shape=x_in.shape[1:], include_top=False, weights=None) model.load_weights(WEIGHTS_DIR + "nasnet_mobile_no_top.h5") return model(x_in) elif backbone_type == 'Xception': return Xception(input_shape=x_in.shape[1:], include_top=False, weights=weights)(x_in) elif backbone_type == 'MobileNetV3Small': model = MobileNetV3Small(input_shape=x_in.shape[1:], include_top=False, weights=None) model.load_weights(WEIGHTS_DIR + "mobilenet_v3_small_notop.ckpt") return model(x_in) elif backbone_type == 'MobileNetV3Large': model = MobileNetV3Large(input_shape=x_in.shape[1:], include_top=False, weights=None) model.load_weights(WEIGHTS_DIR + "mobilenet_v3_large_notop.ckpt") return model(x_in) elif backbone_type == 'EfficientNetLite0': model = EfficientNetLite0(input_shape=x_in.shape[1:], include_top=False, weights=None) model.load_weights(WEIGHTS_DIR + "efficientnet_lite0_notop.ckpt") return model(x_in) elif backbone_type == 'EfficientNetLite1': model = EfficientNetLite1(input_shape=x_in.shape[1:], include_top=False, weights=None) model.load_weights(WEIGHTS_DIR + "efficientnet_lite1_notop.ckpt") return model(x_in) elif backbone_type == 'EfficientNetLite2': model = EfficientNetLite2(input_shape=x_in.shape[1:], include_top=False, weights=None) model.load_weights(WEIGHTS_DIR + "efficientnet_lite2_notop.ckpt") return model(x_in) elif backbone_type == 'EfficientNetLite3': model = EfficientNetLite3(input_shape=x_in.shape[1:], include_top=False, weights=None) model.load_weights(WEIGHTS_DIR + "efficientnet_lite3_notop.ckpt") return model(x_in) elif backbone_type == 'EfficientNetLite4': model = EfficientNetLite4(input_shape=x_in.shape[1:], include_top=False, weights=None) model.load_weights(WEIGHTS_DIR + "efficientnet_lite4_notop.ckpt") return model(x_in) elif backbone_type == 'EfficientNetLite5': model = EfficientNetLite5(input_shape=x_in.shape[1:], include_top=False, weights=None) model.load_weights(WEIGHTS_DIR + "efficientnet_lite5_notop.ckpt") return model(x_in) elif backbone_type == 'EfficientNetLite6': model = EfficientNetLite6(input_shape=x_in.shape[1:], include_top=False, weights=None) model.load_weights(WEIGHTS_DIR + "efficientnet_lite6_notop.ckpt") return model(x_in) elif backbone_type == 'EfficientNetB0': model = EfficientNetB0(input_shape=x_in.shape[1:], include_top=False, weights=None) model.load_weights(WEIGHTS_DIR + "efficientnetb0_notop.ckpt") return model(x_in) elif backbone_type == 'EfficientNetB1': model = EfficientNetB1(input_shape=x_in.shape[1:], include_top=False, weights=None) model.load_weights(WEIGHTS_DIR + "efficientnetb1_notop.ckpt") return model(x_in) elif backbone_type == 'EfficientNetB2': model = EfficientNetB2(input_shape=x_in.shape[1:], include_top=False, weights=None) model.load_weights(WEIGHTS_DIR + "efficientnetb2_notop.ckpt") return model(x_in) elif backbone_type == 'EfficientNetB3': model = EfficientNetB3(input_shape=x_in.shape[1:], include_top=False, weights=None) model.load_weights(WEIGHTS_DIR + "efficientnetb3_notop.ckpt") return model(x_in) elif backbone_type == 'EfficientNetB4': model = EfficientNetB4(input_shape=x_in.shape[1:], include_top=False, weights=None) model.load_weights(WEIGHTS_DIR + "efficientnetb4_notop.ckpt") return model(x_in) elif backbone_type == 'EfficientNetB5': model = EfficientNetB5(input_shape=x_in.shape[1:], include_top=False, weights=None) model.load_weights(WEIGHTS_DIR + "efficientnetb5_notop.ckpt") return model(x_in) elif backbone_type == 'EfficientNetB6': model = EfficientNetB6(input_shape=x_in.shape[1:], include_top=False, weights=None) if use_pretrain: model.load_weights(WEIGHTS_DIR + "efficientnetb6_notop.ckpt") return model(x_in) elif backbone_type == 'EfficientNetB7': model = EfficientNetB7(input_shape=x_in.shape[1:], include_top=False, weights=None) model.load_weights(WEIGHTS_DIR + "efficientnetb7_notop.ckpt") return model(x_in) elif backbone_type == 'MnasNetA1': return MnasNetModel(input_shape=x_in.shape[1:], include_top=False, weights=None, name="MnasNetA1")(x_in) elif backbone_type == 'MnasNetB1': return MnasNetModel(input_shape=x_in.shape[1:], include_top=False, weights=None, name="MnasNetB1")(x_in) elif backbone_type == 'MnasNetSmall': return MnasNetModel(input_shape=x_in.shape[1:], include_top=False, weights=None, name="MnasNetSmall")(x_in) else: raise TypeError('backbone_type error!')
from tensorflow.keras.applications import VGG16, VGG19, Xception from tensorflow.keras.applications import ResNet101, ResNet101V2, ResNet152, ResNet152V2 from tensorflow.keras.applications import ResNet50, ResNet50V2 from tensorflow.keras.applications import InceptionV3, InceptionResNetV2 from tensorflow.keras.applications import MobileNet, MobileNetV2 from tensorflow.keras.applications import DenseNet121, DenseNet169, DenseNet201 from tensorflow.keras.applications import NASNetLarge, NASNetMobile from tensorflow.keras.applications import EfficientNetB0, EfficientNetB1 model = ResNet101V2() model.trainable = True # 훈련을 시키겠다 model.summary() print(len(model.weights)) print(len(model.trainable_weights)) # 모델 별로 파라미터와 웨이트 수 ''' VGG16 Total params: 138,357,544 Trainable params: 138,357,544 Non-trainable params: 0 _________________________________________________________________ 32 32 VGG19 Total params: 143,667,240 Trainable params: 143,667,240 Non-trainable params: 0
# vgg16.summary() print("VGG16",len(vgg16.trainable_weights)/2) print('----------------------------------------------------------------------------') vgg16 = VGG19() # vgg16.summary() print("VGG19레이어 수 ",len(vgg16.trainable_weights)/2) print('----------------------------------------------------------------------------') vgg16 = Xception() # vgg16.summary() print("Xception",len(vgg16.trainable_weights)/2) print('----------------------------------------------------------------------------') vgg16 = ResNet101() # vgg16.summary() print("ResNet101",len(vgg16.trainable_weights)/2) print('----------------------------------------------------------------------------') vgg16 = ResNet101V2() # vgg16.summary() print("ResNet101V2",len(vgg16.trainable_weights)/2) print('----------------------------------------------------------------------------') vgg16 = ResNet152() # vgg16.summary() print("ResNet152",len(vgg16.trainable_weights)/2) print('----------------------------------------------------------------------------') vgg16 = ResNet50() # vgg16.summary() print("ResNet50",len(vgg16.trainable_weights)/2) print('----------------------------------------------------------------------------') vgg16 = ResNet50V2() # vgg16.summary() print("ResNet50V2",len(vgg16.trainable_weights)/2)
def build_model(encoder='efficientnetb7', center='dac', full_skip=True, attention='sc', upscore='upall'): MODEL_NAME = encoder if center is not None: MODEL_NAME = MODEL_NAME+'_'+center if attention is not None: MODEL_NAME = MODEL_NAME+'_'+attention if full_skip: MODEL_NAME = MODEL_NAME + '_fullskip' if upscore is not None: MODEL_NAME = MODEL_NAME + '_'+upscore if encoder == 'resnet50': encoder = ResNet50(input_tensor=Input(shape=(IMAGE_SIZE, IMAGE_SIZE, 3), name='data'), weights='imagenet', include_top=False) skip_names = ['data', 'conv1_relu', 'conv2_block3_out', 'conv3_block4_out', 'conv4_block6_out'] encoder_output = encoder.get_layer('conv5_block3_out').output # data 320x320x3 # conv1_relu 160x160x64 # conv2_block3_out 80x80x256 # conv3_block4_out 40x40x512 # conv4_block6_out 20x20x1024 # conv5_block3_out 10x10x2048 --> encoder output elif encoder == 'resnet101': encoder = ResNet101(input_tensor=Input(shape=(IMAGE_SIZE, IMAGE_SIZE, 3), name='data'), weights='imagenet', include_top=False) skip_names = ['data', 'conv1_relu', 'conv2_block3_out', 'conv3_block4_out'] encoder_output = encoder.get_layer('conv4_block23_out').output #data 320x320x3 #conv1_relu 160x160x64 #conv2_block3_out 80x80x256 #conv3_block4_out 40x40x512 #conv4_block23_out 20x20x1024 --> encoder output #conv5_block3_out 10x10x2048 elif encoder == 'resnet50v2': encoder = ResNet50V2(input_tensor=Input(shape=(IMAGE_SIZE, IMAGE_SIZE, 3), name='data'), weights='imagenet', include_top=False) skip_names = ['data', 'conv1_conv', 'conv2_block3_1_relu', 'conv3_block4_1_relu', 'conv4_block6_1_relu'] encoder_output = encoder.get_layer('post_relu').output # data 320x320x3 # conv1_conv 160x160x64 # conv2_block3_1_relu 80x80x64 # conv3_block4_1_relu 40x40x128 # conv4_block6_1_relu 20x20x256 # post_relu 10x10x2048 --> encoder output elif encoder == 'resnet101v2': encoder = ResNet101V2(input_tensor=Input(shape=(IMAGE_SIZE, IMAGE_SIZE, 3), name='data'), weights='imagenet', include_top=False) skip_names = ['data', 'conv1_conv', 'conv2_block3_1_relu', 'conv3_block4_1_relu', 'conv4_block23_1_relu'] encoder_output = encoder.get_layer('post_relu').output #data 320x320x3 #conv1_conv 160x160x64 #conv2_block3_1_relu 80x80x64 #conv3_block4_1_relu 40x40x128 #conv4_block23_1_relu 20x20x256 #post_relu 10x10x2048 --> encoder output elif encoder == 'vgg19': encoder = VGG19(input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3), weights='imagenet', include_top=False) skip_names = ['block1_conv2', 'block2_conv2', 'block3_conv4', 'block4_conv4', 'block5_conv4'] encoder_output = encoder.get_layer('block5_pool').output # block1_conv2 320x320x64 # block2_conv2 160x160x128 # block3_conv4 80x80x256 # block4_conv4 40x40x512 # block5_conv4 20x20x512 # block5_pool 10x10x512 --> encoder output elif encoder == 'efficientnetb6': encoder = EfficientNetB6(input_tensor=Input(shape=(IMAGE_SIZE, IMAGE_SIZE, 3), name='data'), weights='imagenet', include_top=False) skip_names = ['data', 'block2a_expand_activation', 'block3a_expand_activation', 'block4a_expand_activation'] encoder_output = encoder.get_layer('block6a_expand_activation').output #data 320x320x3 #block2a_expand_activation 160x160x192 #block3a_expand_activation 80x80x240 #block4a_expand_activation 40x40x432 #block6a_expand_activation 20x20x1200 --> encoder output #top_activation 10x10x2304 elif encoder == 'efficientnetb7': encoder = EfficientNetB7(input_tensor=Input(shape=(IMAGE_SIZE, IMAGE_SIZE, 3), name='data'), weights='imagenet', include_top=False) skip_names = ['data', 'block2a_expand_activation', 'block3a_expand_activation', 'block4a_expand_activation'] encoder_output = encoder.get_layer('block6a_expand_activation').output #data 320x320x3 #block2a_expand_activation 160x160x192 #block3a_expand_activation 80x80x288 #block4a_expand_activation 40x40x480 #block6a_expand_activation 20x20x1344 --> encoder output #top_activation 10x10x elif encoder == 'mobilenetv2': encoder = MobileNetV2(input_tensor=Input(shape=(IMAGE_SIZE, IMAGE_SIZE, 3), name='data'), weights='imagenet', include_top=False) skip_names = ['data', 'block_1_expand_relu', 'block_3_expand_relu', 'block_6_expand_relu', 'block_13_expand_relu'] encoder_output = encoder.get_layer('out_relu').output # data 320x320x3 # block_1_expand_relu 160x160x96 # block_3_expand_relu 80x80x144 # block_6_expand_relu 40x40x192 # block_13_expand_relu 20x20x576 # out_relu 10x10x1248 --> encoder output skip_layers = [encoder.get_layer(i).output for i in skip_names] # Center -------------- if center == 'atrous': x = atrous_block(encoder_output) elif center == 'dac': x = dense_atrous_block(encoder_output) elif center == 'aspp': x = aspp_block(encoder_output) elif center is None: x = encoder_output # Decoder -------------- if attention == 'se': attn_block = se_block elif attention == 'cbam': attn_block = cbam_block elif attention == 'sc': attn_block = scSE_block filters = [i.shape[-1] for i in skip_layers] filters[0] = 64 scales = [2 ** i for i in range(1, len(filters))][::-1] X = [] for i in range(1, len(filters) + 1): X.append(x) down = [] if full_skip: for j in range(len(scales) - (i - 1)): d = down_skip(skip_layers[j], scales[j + (i - 1)], filters[-1]//4) if attention is not None: d = attn_block(d) down.append(d) direct = direct_skip(skip_layers[-i], filters[-1]//4) if attention is not None: direct = attn_block(direct) x = convtranspose_block(x, filters[-1]//4) if attention is not None: x = attn_block(x) x = Concatenate()([x] + [direct] + down) x = conv3_block(x, x.shape[-1]) if upscore is not None: if upscore=='upall': up_scales=[2 ** i for i in range(1, len(filters)+1)][::-1] UP = [upscore_block(x, 32, up_scales[i]) for i, x in enumerate(X)] if attention is not None: UP = [attn_block(x) for x in UP] up = Concatenate()(UP) elif upscore=='upcenter': up = upscore_block(X[0], 64, 2 ** len(filters)) if attention is not None: up = attn_block(up) x = Concatenate()([x, up]) x = Conv2D(1, 1, padding='same')(x) x = Activation('sigmoid')(x) model = Model(encoder.input, x) metrics = [dice_coef, Recall(), Precision()] opt = Nadam(LR) model.compile(loss=bce_dice_loss, optimizer=opt, metrics=metrics) return model, MODEL_NAME
def CNN_model(self, learning_rate, epoch, batchsize, whether_Adam, Momentum_gamma, weight_decay, whether_load, cnn_type): """ Resnet model :param learning_rate :param epoch :param batchsize :param whether_Adam: whether to perform Adam optimiser, if not perform Momentum :param Momentum gamma: a variable of Momentum :param weight_decay: weight decay for Momentum :param whether_load: whether to load trained Resnet model in if it exists (or cover it) """ test_cnn_mfcc = self.train_mfcc test_cnn_label = self.train_label if(isfile("model/resnet_label.hdf5") and whether_load): self.cnn_model = load_model("model/resnet_label.hdf5") else: train_cnn_mfcc = self.test_mfcc train_cnn_label = self.test_label val_cnn_mfcc = self.validate_mfcc val_cnn_label = self.validate_label # input input = Input(shape=(self.test_mfcc.shape[1], self.test_mfcc.shape[2], 1)) # Concatenate -1 dimension to be three channels, to fit the input need in ResNet50 input_concate = Concatenate()([input,input,input]) # CNN series network (VGG+Resnet) # reference: https://keras.io/api/applications/ if(cnn_type == 'ResNet50'): from tensorflow.keras.applications import ResNet50 cnn_output = ResNet50(pooling = 'avg')(input_concate) elif(cnn_type == 'ResNet101'): from tensorflow.keras.applications import ResNet101 cnn_output = ResNet101(pooling = 'avg')(input_concate) elif(cnn_type == 'ResNet152'): from tensorflow.keras.applications import ResNet152 cnn_output = ResNet152(pooling = 'avg')(input_concate) elif(cnn_type == 'ResNet50V2'): from tensorflow.keras.applications import ResNet50V2 cnn_output = ResNet50V2(pooling = 'avg')(input_concate) elif(cnn_type == 'ResNet101V2'): from tensorflow.keras.applications import ResNet101V2 cnn_output = ResNet101V2(pooling = 'avg')(input_concate) elif(cnn_type == 'ResNet152V2'): from tensorflow.keras.applications import ResNet152V2 cnn_output = ResNet152V2(pooling = 'avg')(input_concate) elif(cnn_type == 'VGG16'): # width and height should not smaller than 32 from tensorflow.keras.applications import VGG16 cnn_output = VGG16(include_top = False, pooling = 'avg')(input_concate) cnn_output = Flatten()(cnn_output) elif(cnn_type == 'VGG19'): # width and height should not smaller than 32 from tensorflow.keras.applications import VGG19 cnn_output = VGG19(include_top = False, pooling = 'avg')(input_concate) cnn_output = Flatten()(cnn_output) else: # CNN layers we design print("No recognised CNN network. The CNN layers we designed are performed") # convolution layers conv_output1 = Conv2D(filters=32, strides=(1, 1), kernel_size=5, activation='relu')(input) # pool_output1 = MaxPool2D(pool_size=(2, 2))(conv_output1) conv_output2 = Conv2D(filters=8, strides=(2, 2), kernel_size=4, activation='relu')(conv_output1) conv_output2 = Dropout(0.2)(conv_output2) conv_output2_batch = BatchNormalization()(conv_output2) cnn_output = Flatten()(conv_output2_batch) cnn_output = Flatten()(cnn_output) # dense with sigmoid Dense_sigmoid = Dense(24, activation='sigmoid')(cnn_output) Dense_sigmoid = Dropout(0.2)(Dense_sigmoid) # dense output output = Dense(self.test_label.shape[1], activation='softmax')(Dense_sigmoid) # cnn model for labels recognision self.cnn_model = Model(input, output) # optimizer if whether_Adam: optimizer = optimizers.Adam(lr=learning_rate, beta_1 = Momentum_gamma, decay=weight_decay) else: optimizer = optimizers.SGD(lr=learning_rate, momentum=Momentum_gamma, nesterov=True, decay=weight_decay) self.cnn_model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['mse', 'accuracy']) start = time.time() self.history = self.cnn_model.fit(train_cnn_mfcc, train_cnn_label, epochs=epoch, batch_size=batchsize, validation_data=[val_cnn_mfcc,val_cnn_label]) self.training_time = time.time() - start self.cnn_model.save("model/resnet_label.hdf5") # model evaluation self.cnn_model.predict(test_cnn_mfcc) self.score = self.cnn_model.evaluate(test_cnn_mfcc, test_cnn_label) print("test loss: ", self.score[0], ", mse: ", self.score[1], ", accuracy", self.score[2])
callbacks=[callback], validation_data=val_generator, workers=4 ) score = model.evaluate(val_generator,verbose=2) print('Test loss:', score[0]) print('Test accuracy:', score[1]) """## ResNet101V2 """ from tensorflow.keras.applications import ResNet101V2 net= ResNet101V2(include_top=False, weights='imagenet', input_tensor=Input(shape=(150,150,3))) for layer in net.layers[:-5]: layer.trainable = False x = net.output x = Flatten()(x) x = Dropout(0.5)(x) output_layer = Dense(1, activation='sigmoid', name='sigmoid')(x) model = Model(inputs=net.input, outputs=output_layer) # initiate RMSprop optimizer opt = keras.optimizers.RMSprop(lr=0.0001, decay=1e-6) # Train the model using RMSprop model.compile(loss='binary_crossentropy',
def loadModel(mode, modelWeights, organ, modelType): """ Load model and compile it Input training or inference mode, model weights and type of model Return model """ # Load model input configuration modelInputConfig = loadModelInputConf(organ) # Get values useChannels = modelInputConfig.useChannels useClasses = modelInputConfig.useClasses useResolution = modelInputConfig.useResolution # Define model if modelType == 'ResNet101': model = ResNet101(include_top=True, weights=modelWeights, input_shape=(useResolution[0], useResolution[1], useChannels), classes=useClasses) elif modelType == 'SEResNet101': mySEResNet = AllSEResNets.SEResNet101 model = mySEResNet(include_top=True, weights=modelWeights, input_shape=(useResolution[0], useResolution[1], useChannels), classes=useClasses) elif modelType == 'SEResNet154': mySEResNet = AllSEResNets.SEResNet154 model = mySEResNet(include_top=True, weights=modelWeights, input_shape=(useResolution[0], useResolution[1], useChannels), classes=useClasses) # elif modelType == 'SEInceptionResNetV2': # mySEInceptionResNet = AllSEInceptionResNets.SEInceptionResNetV2 # model = mySEInceptionResNet(include_top=True, weights=modelWeights, input_shape=( # useResolution[0], useResolution[1], useChannels), classes=useClasses) elif modelType == 'EfficientNetB4': model = EfficientNetB4(include_top=True, weights=modelWeights, input_shape=(useResolution[0], useResolution[1], useChannels), classes=useClasses, classifier_activation="softmax") elif modelType == 'Xception': model = Xception(include_top=True, weights=modelWeights, input_shape=(useResolution[0], useResolution[1], useChannels), classes=useClasses) elif modelType == 'ResNet101V2': model = ResNet101V2(include_top=True, weights=modelWeights, input_shape=(useResolution[0], useResolution[1], useChannels), classes=useClasses, classifier_activation="softmax") elif modelType == 'ResNet152V2': model = ResNet152V2(include_top=True, weights=modelWeights, input_shape=(useResolution[0], useResolution[1], useChannels), classes=useClasses, classifier_activation="softmax") elif modelType == 'InceptionResNetV2': model = InceptionResNetV2(include_top=True, weights=modelWeights, input_shape=(useResolution[0], useResolution[1], useChannels), classes=useClasses, classifier_activation="softmax") elif modelType == 'ResNet50V2': model = ResNet50V2(include_top=True, weights=modelWeights, input_shape=(useResolution[0], useResolution[1], useChannels), classes=useClasses, classifier_activation="softmax") elif modelType == 'NASNetLarge': model = NASNetLarge(include_top=True, weights=modelWeights, input_shape=(useResolution[0], useResolution[1], useChannels), classes=useClasses) else: raise ValueError('The selected model could not be found') if mode == 'training': print('Loaded model ' + modelType + ' for training, no weights loaded') # Add reglizarization if needed # model = addRegularization(model, tf.keras.regularizers.l2(0.0000)) if mode == 'inference': print('Loaded model ' + modelType + ' for inference, weights loaded.') # Do not add regularization model.compile( optimizer='adam', loss='categorical_crossentropy', # metrics=['accuracy'] metrics=[ 'accuracy', tf.keras.metrics.Precision(), tf.keras.metrics.Recall(), tf.keras.metrics.AUC() ], weighted_metrics=[ 'accuracy', tf.keras.metrics.Precision(), tf.keras.metrics.Recall(), tf.keras.metrics.AUC() ]) return model