def resnet50(): """ resnet50的基础特征提取网络 :return: """ bn_axis = 3 input_tensor = Input(shape=(224, 224, 3)) # resnet50基础网络部分 x = layers.ZeroPadding2D(padding=(3, 3), name='conv1_pad')(input_tensor) x = layers.Conv2D(64, (7, 7), strides=(2, 2), padding='valid', kernel_initializer='he_normal', name='conv1')(x) x = layers.BatchNormalization(axis=bn_axis, name='bn_conv1')(x) x = layers.Activation('relu')(x) x = layers.MaxPooling2D((3, 3), strides=(2, 2))(x) x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1)) x = identity_block(x, 3, [64, 64, 256], stage=2, block='b') x = identity_block(x, 3, [64, 64, 256], stage=2, block='c') x = conv_block(x, 3, [128, 128, 512], stage=3, block='a') x = identity_block(x, 3, [128, 128, 512], stage=3, block='b') x = identity_block(x, 3, [128, 128, 512], stage=3, block='c') x = identity_block(x, 3, [128, 128, 512], stage=3, block='d') x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a') x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b') x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c') x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d') x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e') x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f') ''' # 选定trainable的层,默认全部训练 base_model = Model(inputs=img_input, outputs=x) for layer in base_model.layers: if isinstance(layer, BatchNormalization): layer.trainable = True else: layer.trainable = False ''' return input_tensor, x
def ResNet50_lr(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000): """Instantiates the ResNet50 architecture. Optionally loads weights pre-trained on ImageNet. Note that when using TensorFlow, for best performance you should set `image_data_format='channels_last'` in your Keras config at ~/.keras/keras.json. The model and the weights are compatible with both TensorFlow and Theano. The data format convention used by the model is the one specified in your Keras config file. # Arguments include_top: whether to include the fully-connected layer at the top of the network. weights: one of `None` (random initialization), 'imagenet' (pre-training on ImageNet), or the path to the weights file to be loaded. input_tensor: optional Keras tensor (i.e. output of `layers.Input()`) to use as image input for the model. input_shape: optional shape tuple, only to be specified if `include_top` is False (otherwise the input shape has to be `(224, 224, 3)` (with `channels_last` data format) or `(3, 224, 224)` (with `channels_first` data format). It should have exactly 3 inputs channels, and width and height should be no smaller than 197. E.g. `(200, 200, 3)` would be one valid value. pooling: Optional pooling mode for feature extraction when `include_top` is `False`. - `None` means that the output of the model will be the 4D tensor output of the last convolutional layer. - `avg` means that global average pooling will be applied to the output of the last convolutional layer, and thus the output of the model will be a 2D tensor. - `max` means that global max pooling will be applied. classes: optional number of classes to classify images into, only to be specified if `include_top` is True, and if no `weights` argument is specified. # Returns A Keras model instance. # Raises ValueError: in case of invalid argument for `weights`, or invalid input shape. """ if not (weights in {'imagenet', None} or os.path.exists(weights)): raise ValueError('The `weights` argument should be either ' '`None` (random initialization), `imagenet` ' '(pre-training on ImageNet), ' 'or the path to the weights file to be loaded.') if weights == 'imagenet' and include_top and classes != 1000: raise ValueError('If using `weights` as imagenet with `include_top`' ' as true, `classes` should be 1000') # Determine proper input shape input_shape = _obtain_input_shape(input_shape, default_size=224, min_size=64, data_format=K.image_data_format(), require_flatten=include_top, weights=weights) if input_tensor is None: img_input = Input(shape=input_shape) else: if not K.is_keras_tensor(input_tensor): img_input = Input(tensor=input_tensor, shape=input_shape) else: img_input = input_tensor if K.image_data_format() == 'channels_last': bn_axis = 3 else: bn_axis = 1 x = ZeroPadding2D(padding=(3, 3), name='conv1_pad')(img_input) x = Conv2D(64, (7, 7), strides=(2, 2), padding='valid', name='conv1')(x) x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x) x = Activation('relu')(x) x = MaxPooling2D((3, 3), strides=(2, 2))(x) x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1)) x = identity_block(x, 3, [64, 64, 256], stage=2, block='b') x = identity_block(x, 3, [64, 64, 256], stage=2, block='c') x = conv_block(x, 3, [128, 128, 512], stage=3, block='a') x = identity_block(x, 3, [128, 128, 512], stage=3, block='b') x = identity_block(x, 3, [128, 128, 512], stage=3, block='c') x = identity_block(x, 3, [128, 128, 512], stage=3, block='d') x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a') x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b') x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c') x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d') x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e') x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f') x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a') x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b') x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c') # x = AveragePooling2D((7, 7), name='avg_pool')(x) if include_top: x = Flatten()(x) x = Dense(classes, activation='softmax', name='fc1000')(x) else: if pooling == 'avg': x = GlobalAveragePooling2D()(x) elif pooling == 'max': x = GlobalMaxPooling2D()(x) # Ensure that the model takes into account # any potential predecessors of `input_tensor`. if input_tensor is not None: inputs = get_source_inputs(input_tensor) else: inputs = img_input # Create model. model = Model(inputs, x, name='resnet50') # load weights if weights == 'imagenet': if include_top: weights_path = get_file( 'resnet50_weights_tf_dim_ordering_tf_kernels.h5', WEIGHTS_PATH, cache_subdir='models', md5_hash='a7b3fe01876f51b976af0dea6bc144eb') else: weights_path = get_file( 'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5', WEIGHTS_PATH_NO_TOP, cache_subdir='models', md5_hash='a268eb855778b3df3c7506639542a6af') model.load_weights(weights_path) if K.backend() == 'theano': layer_utils.convert_all_kernels_in_model(model) if include_top: maxpool = model.get_layer(name='avg_pool') shape = maxpool.output_shape[1:] dense = model.get_layer(name='fc1000') layer_utils.convert_dense_weights_data_format( dense, shape, 'channels_first') if K.image_data_format() == 'channels_first' and K.backend( ) == 'tensorflow': warnings.warn('You are using the TensorFlow backend, yet you ' 'are using the Theano ' 'image data format convention ' '(`image_data_format="channels_first"`). ' 'For best performance, set ' '`image_data_format="channels_last"` in ' 'your Keras config ' 'at ~/.keras/keras.json.') elif weights is not None: model.load_weights(weights) return model
def ResNet50(self): classes = self.num_classes img_input = Input(shape=self.input_shape) x = layers.ZeroPadding2D(padding=(3, 3), name='conv1_pad')(img_input) x = layers.Conv2D(64, (7, 7), strides=(2, 2), padding='valid', name='conv1')(x) x = layers.BatchNormalization(axis=3, name='bn_conv1')(x) x = layers.Activation('relu')(x) x = layers.MaxPooling2D((3, 3), strides=(2, 2))(x) x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1)) x = identity_block(x, 3, [64, 64, 256], stage=2, block='b') x = identity_block(x, 3, [64, 64, 256], stage=2, block='c') x = conv_block(x, 3, [128, 128, 512], stage=3, block='a') x = identity_block(x, 3, [128, 128, 512], stage=3, block='b') x = identity_block(x, 3, [128, 128, 512], stage=3, block='c') x = identity_block(x, 3, [128, 128, 512], stage=3, block='d') x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a') x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b') x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c') x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d') x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e') x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f') x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a') x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b') x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c') x = layers.AveragePooling2D((7, 7), name='avg_pool')(x) x = layers.Flatten()(x) x = layers.Dense(classes, activation='softmax', name='fc1000')(x) model = models.Model(img_input, x, name='resnet50') return model
def KerasModelResNet(self, imgInput): """ Construct resNet. The image size is 150*150, which is suitable for image. """ bn_axis = 3 x = ZeroPadding2D((3, 3))(imgInput) x = Convolution2D(8, 7, strides=(2, 2), name='conv1')(x) x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x) x = Activation('relu')(x) x = MaxPooling2D((3, 3), strides=(2, 2))(x) x = conv_block(x, 3, [8, 8, 16], stage=2, block='a', strides=(1, 1)) x = identity_block(x, 3, [8, 8, 16], stage=2, block='b') x = identity_block(x, 3, [8, 8, 16], stage=2, block='c') x = conv_block(x, 3, [16, 16, 32], stage=3, block='a') x = identity_block(x, 3, [16, 16, 32], stage=3, block='b') x = identity_block(x, 3, [16, 16, 32], stage=3, block='c') x = identity_block(x, 3, [16, 16, 32], stage=3, block='d') x = conv_block(x, 3, [32, 32, 64], stage=4, block='a') x = identity_block(x, 3, [32, 32, 64], stage=4, block='b') x = identity_block(x, 3, [32, 32, 64], stage=4, block='c') x = identity_block(x, 3, [32, 32, 64], stage=4, block='d') x = identity_block(x, 3, [32, 32, 64], stage=4, block='e') x = identity_block(x, 3, [32, 32, 64], stage=4, block='f') x = conv_block(x, 3, [64, 64, 128], stage=5, block='a') x = identity_block(x, 3, [64, 64, 128], stage=5, block='b') x = identity_block(x, 3, [64, 64, 128], stage=5, block='c') x = conv_block(x, 3, [64, 64, 256], stage=6, block='a') x = identity_block(x, 3, [64, 64, 256], stage=6, block='b') x = identity_block(x, 3, [64, 64, 256], stage=6, block='c') x = GlobalAveragePooling2D()(x) # x = Flatten()(x) x = Dense(self.featureDim, kernel_regularizer=regularizers.l2(0.0002), activity_regularizer=regularizers.l1(0.0002), name='fc_feature')(x) x = PReLU()(x) return x
def resnet50(inputs): # Determine proper input shape bn_axis = 3 # x = layers.ZeroPadding2D(padding=(3, 3), name='conv1_pad')(inputs) x = layers.Conv2D(64, (3, 3), strides=(1, 1), padding='same', name='conv1')(inputs) x = layers.BatchNormalization(axis=bn_axis, name='bn_conv1')(x) x = layers.Activation('relu')(x) x = layers.MaxPooling2D((3, 3), strides=(2, 2))(x) x = conv_block(x, 3, [64, 64, 64], stage=2, block='a', strides=(1, 1)) x = identity_block(x, 3, [64, 64, 64], stage=2, block='b') x = identity_block(x, 3, [64, 64, 64], stage=2, block='c') x = conv_block(x, 3, [128, 128, 128], stage=3, block='a') x = identity_block(x, 3, [128, 128, 128], stage=3, block='b') x = identity_block(x, 3, [128, 128, 128], stage=3, block='c') x = identity_block(x, 3, [128, 128, 128], stage=3, block='d') x = conv_block(x, 3, [256, 256, 256], stage=4, block='a') x = identity_block(x, 3, [256, 256, 256], stage=4, block='b') x = identity_block(x, 3, [256, 256, 256], stage=4, block='c') x = identity_block(x, 3, [256, 256, 256], stage=4, block='d') x = identity_block(x, 3, [256, 256, 256], stage=4, block='e') x = identity_block(x, 3, [256, 256, 256], stage=4, block='f') x = conv_block(x, 3, [512, 512, 512], stage=5, block='a') x = identity_block(x, 3, [512, 512, 512], stage=5, block='b') x = identity_block(x, 3, [512, 512, 512], stage=5, block='c') # # 确定精调层 # no_train_model = Model(inputs=img_input, outputs=x) # for l in no_train_model.layers: # if isinstance(l, layers.BatchNormalization): # l.trainable = True # else: # l.trainable = False # model = Model(input, x, name='resnet50') x = layers.GlobalAveragePooling2D()(x) # # 新增一个全连接层降维 # x = layers.Dense(units=512)(x) return x
def resnet50_unet_sigmoid( input_shape=(IMG_H, IMG_W, IMG_C), weights='imagenet'): inp = Input(input_shape) x = layers.ZeroPadding2D(padding=(3, 3), name='conv1_pad')(inp) x = layers.Conv2D(64, (7, 7), strides=(2, 2), padding='valid', name='conv1')(x) x = layers.BatchNormalization(axis=BN_AXIS, name='bn_conv1')(x) x = layers.Activation('relu')(x) c1 = x # print("c1") # print(c1.shape) x = layers.MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x) x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1)) x = identity_block(x, 3, [64, 64, 256], stage=2, block='b') x = identity_block(x, 3, [64, 64, 256], stage=2, block='c') c2 = x # print("c2") # print(c2.shape) x = conv_block(x, 3, [128, 128, 512], stage=3, block='a') x = identity_block(x, 3, [128, 128, 512], stage=3, block='b') x = identity_block(x, 3, [128, 128, 512], stage=3, block='c') x = identity_block(x, 3, [128, 128, 512], stage=3, block='d') c3 = x # print("c3") # print(c3.shape) x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a') x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b') x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c') x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d') x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e') x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f') c4 = x # print("c4") # print(c4.shape) x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a') x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b') x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c') c5 = x # print("c5") # print(c5.shape) u6 = conv_block_custom(UpSampling2D()(c5), 1024) # print("u6") # print(u6.shape) u6 = concatenate([u6, c4], axis=-1) u6 = conv_block_custom(u6, 1024) u7 = conv_block_custom(UpSampling2D()(u6), 512) # print("u7") # print(u7.shape) u7 = concatenate([u7, c3], axis=-1) u7 = conv_block_custom(u7, 512) u8 = conv_block_custom(UpSampling2D()(u7), 256) # print("u8") # print(u8.shape) u8 = concatenate([u8, c2], axis=-1) u8 = conv_block_custom(u8, 256) u9 = conv_block_custom(UpSampling2D()(u8), 64) # print("u9") # print(u9.shape) u9 = concatenate([u9, c1], axis=-1) u9 = conv_block_custom(u9, 64) u10 = conv_block_custom(UpSampling2D()(u9), 32) u10 = conv_block_custom(u10, 32) res = Conv2D(2, (1, 1), activation='sigmoid')(u10) model = Model(inp, res) if weights == "imagenet": resnet50 = ResNet50(weights=weights, include_top=False, input_shape=(input_shape[0], input_shape[1], 3)) # resnet50.summary() print("Loading imagenet weitghts ...") for i in tqdm(range(3, len(resnet50.layers) - 2)): try: model.layers[i].set_weights(resnet50.layers[i].get_weights()) model.layers[i].trainable = False except: print(resnet50.layers[i].name) exit() print("imagenet weights have been loaded.") del resnet50 return model
def custom_resnet(n=0, dp_rate=0): WEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.2/resnet50_weights_tf_dim_ordering_tf_kernels.h5' WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.2/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5' # Determine proper input shape # input_shape = _obtain_input_shape(input_shape,default_size=224,min_size=197,data_format=K.image_data_format(),include_top=include_top) img_input = Input(shape=(224, 224, 3)) if K.image_data_format() == 'channels_last': bn_axis = 3 else: bn_axis = 1 x = ZeroPadding2D((3, 3))(img_input) x = Conv2D(64, (7, 7), strides=(2, 2), name='conv1')(x) x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x) x = Activation('relu')(x) x = MaxPooling2D((3, 3), strides=(2, 2))(x) x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1)) x = identity_block(x, 3, [64, 64, 256], stage=2, block='b') x = identity_block(x, 3, [64, 64, 256], stage=2, block='c') x = conv_block(x, 3, [128, 128, 512], stage=3, block='a') x = identity_block(x, 3, [128, 128, 512], stage=3, block='b') x = identity_block(x, 3, [128, 128, 512], stage=3, block='c') x = identity_block(x, 3, [128, 128, 512], stage=3, block='d') x = Dropout(dp_rate)(x) x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a') x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b') x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c') x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d') x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e') x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f') x = Dropout(dp_rate)(x) x = AveragePooling2D((7, 7), name='avg_pool')(x) x = Flatten()(x) x = Dense(25, activation='softmax', name='fc1000')(x) # Ensure that the model takes into account # any potential predecessors of `input_tensor`. inputs = img_input # Create model. model = Model(inputs, x, name='resnet50') # load weights weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5', WEIGHTS_PATH_NO_TOP, cache_subdir='models', md5_hash='a268eb855778b3df3c7506639542a6af') model.load_weights(weights_path, by_name=True) split_value = True # len(model.layers) + 1 - n for layer in model.layers[:split_value]: layer.trainable = False for layer in model.layers[split_value:]: layer.trainable = True return model
def resnet_dropout(include_top=False, weights='imagenet', input_tensor=None, pooling='avg', input_shape=(224, 224, 3), classes=25, dp_rate=0., n_retrain_layers=0): WEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.2/resnet50_weights_tf_dim_ordering_tf_kernels.h5' WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.2/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5' if weights not in {'imagenet', None}: raise ValueError('The `weights` argument should be either ' '`None` (random initialization) or `imagenet` ' '(pre-training on ImageNet).') if weights == 'imagenet' and include_top and classes != 1000: raise ValueError('If using `weights` as imagenet with `include_top`' ' as true, `classes` should be 1000') # Determine proper input shape # input_shape = _obtain_input_shape(input_shape,default_size=224,min_size=197,data_format=K.image_data_format(),include_top=include_top) if input_tensor is None: img_input = Input(shape=input_shape) else: if not K.is_keras_tensor(input_tensor): img_input = Input(tensor=input_tensor, shape=input_shape) else: img_input = input_tensor if K.image_data_format() == 'channels_last': bn_axis = 3 else: bn_axis = 1 x = ZeroPadding2D((3, 3))(img_input) x = Conv2D(64, (7, 7), strides=(2, 2), name='conv1')(x) x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x) x = Activation('relu')(x) x = MaxPooling2D((3, 3), strides=(2, 2))(x) x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1)) x = identity_block(x, 3, [64, 64, 256], stage=2, block='b') x = identity_block(x, 3, [64, 64, 256], stage=2, block='c') x = conv_block(x, 3, [128, 128, 512], stage=3, block='a') x = identity_block(x, 3, [128, 128, 512], stage=3, block='b') x = identity_block(x, 3, [128, 128, 512], stage=3, block='c') x = identity_block(x, 3, [128, 128, 512], stage=3, block='d') x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a') x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b') x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c') x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d') x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e') x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f') x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a') x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b') x = Dropout(dp_rate)(x) x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c') x = AveragePooling2D((7, 7), name='avg_pool')(x) if include_top: x = Flatten()(x) x = Dense(classes, activation='softmax', name='fc1000')(x) else: if pooling == 'avg': x = GlobalAveragePooling2D()(x) elif pooling == 'max': x = GlobalMaxPooling2D()(x) # Ensure that the model takes into account # any potential predecessors of `input_tensor`. if input_tensor is not None: inputs = get_source_inputs(input_tensor) else: inputs = img_input # Create model. model = Model(inputs, x, name='resnet50') # load weights if weights == 'imagenet': if include_top: weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels.h5', WEIGHTS_PATH, cache_subdir='models', md5_hash='a7b3fe01876f51b976af0dea6bc144eb') else: weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5', WEIGHTS_PATH_NO_TOP, cache_subdir='models', md5_hash='a268eb855778b3df3c7506639542a6af') model.load_weights(weights_path) split_value = len(model.layers) + 1 - n_retrain_layers for layer in model.layers[:split_value]: layer.trainable = False for layer in model.layers[split_value:]: layer.trainable = True return model