def create_resnet152C(optimizer, loss='binary_crossentropy', metrics=['accuracy']): base_model = ResNet152(include_top=False, weights='imagenet', input_shape=(224, 224, 3)) # 解凍有節點層 unfreeze = [ 'conv5_block3_1_conv', 'conv5_block3_1_bn', 'conv5_block3_2_conv', 'conv5_block3_2_bn', 'conv5_block3_3_conv', 'conv5_block3_3_bn' ] for layer in base_model.layers: if layer.name in unfreeze: layer.trainable = True # 解凍 else: layer.trainable = False # 其他凍結權重 #for layer in base_model.layers[-2:]: #layer.trainable = True model = create_modelC(base_model) model.compile(loss=loss, optimizer=optimizer, metrics=metrics) return model
def resnet152_fpn(input_shape, channels=1, activation="softmax"): # img_input = Input(input_shape) resnet_base = ResNet152(input_shape=input_shape, include_top=False) # resnet_base.load_weights(download_resnet_imagenet("resnet152")) conv1 = resnet_base.get_layer("conv1_relu").output conv2 = resnet_base.get_layer("res2c_relu").output conv3 = resnet_base.get_layer("res3b7_relu").output conv4 = resnet_base.get_layer("res4b35_relu").output conv5 = resnet_base.get_layer("res5c_relu").output P1, P2, P3, P4, P5 = create_pyramid_features(conv1, conv2, conv3, conv4, conv5) x = concatenate([ prediction_fpn_block(P5, "P5", (8, 8)), prediction_fpn_block(P4, "P4", (4, 4)), prediction_fpn_block(P3, "P3", (2, 2)), prediction_fpn_block(P2, "P2"), ]) x = conv_bn_relu(x, 256, 3, (1, 1), name="aggregation") x = decoder_block_no_bn(x, 128, conv1, 'up4') x = UpSampling2D()(x) x = conv_relu(x, 64, 3, (1, 1), name="up5_conv1") x = conv_relu(x, 64, 3, (1, 1), name="up5_conv2") x = Conv2D(channels, (1, 1), name="mask", kernel_initializer="he_normal")(x) x = Activation(activation)(x) model = Model(Input(input_shape), x) return model
def save_frame_to_binary(frame_dict: dict, save_path: str, video_img_dict: dict): print("saving frame into txt .....") model = ResNet152(weights='imagenet', pooling="avg") res_txt_file = os.path.join(save_path, 'id.feature.txt') video2frame_txt_file = os.path.join(save_path, 'video2frame.txt') f = open(res_txt_file, 'w') for key in frame_dict: frame = frame_dict[key] f.write(str(key) + " ") feature = extract_feature(frame, model) for feature_elem in feature: for feature_item in feature_elem: f.write(str(feature_item) + " ") f.write("\n") f.close() print("txt file saved!") #-----------Save video2frame------------- f = open(video2frame_txt_file, 'w') f.write(str(video_img_dict)) f.close() print("video2frame file saved!") pass
def predict(img): model = ResNet152() x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = resnet.preprocess_input(x) predictions = model.predict(x) all_cat_pred = predictions[0] predicted_classes = resnet.decode_predictions(predictions, top=9) return predictions[0]
def __init__(self, model_name=None): if model_name == 'Xception': base_model = Xception(weights='imagenet') self.preprocess_input = xception.preprocess_input elif model_name == 'VGG19': base_model = VGG19(weights='imagenet') self.preprocess_input = vgg19.preprocess_input elif model_name == 'ResNet50': base_model = ResNet50(weights='imagenet') self.preprocess_input = resnet.preprocess_input elif model_name == 'ResNet101': base_model = ResNet101(weights='imagenet') self.preprocess_input = resnet.preprocess_input elif model_name == 'ResNet152': base_model = ResNet152(weights='imagenet') self.preprocess_input = resnet.preprocess_input elif model_name == 'ResNet50V2': base_model = ResNet50V2(weights='imagenet') self.preprocess_input = resnet_v2.preprocess_input elif model_name == 'ResNet101V2': base_model = ResNet101V2(weights='imagenet') self.preprocess_input = resnet_v2.preprocess_input elif model_name == 'ResNet152V2': base_model = ResNet152V2(weights='imagenet') self.preprocess_input = resnet_v2.preprocess_input elif model_name == 'InceptionV3': base_model = InceptionV3(weights='imagenet') self.preprocess_input = inception_v3.preprocess_input elif model_name == 'InceptionResNetV2': base_model = InceptionResNetV2(weights='imagenet') self.preprocess_input = inception_resnet_v2.preprocess_input elif model_name == 'DenseNet121': base_model = DenseNet121(weights='imagenet') self.preprocess_input = densenet.preprocess_input elif model_name == 'DenseNet169': base_model = DenseNet169(weights='imagenet') self.preprocess_input = densenet.preprocess_input elif model_name == 'DenseNet201': base_model = DenseNet201(weights='imagenet') self.preprocess_input = densenet.preprocess_input elif model_name == 'NASNetLarge': base_model = NASNetLarge(weights='imagenet') self.preprocess_input = nasnet.preprocess_input elif model_name == 'NASNetMobile': base_model = NASNetMobile(weights='imagenet') self.preprocess_input = nasnet.preprocess_input elif model_name == 'MobileNet': base_model = MobileNet(weights='imagenet') self.preprocess_input = mobilenet.preprocess_input elif model_name == 'MobileNetV2': base_model = MobileNetV2(weights='imagenet') self.preprocess_input = mobilenet_v2.preprocess_input else: base_model = VGG16(weights='imagenet') self.preprocess_input = vgg16.preprocess_input self.model = Model(inputs=base_model.input, outputs=base_model.layers[-2].output)
def create_resnet152(optimizer, loss='binary_crossentropy', metrics=['accuracy']): # 建立卷積基底 base_model = ResNet152(include_top=False, weights='imagenet', input_shape=(224, 224, 3)) model = create_modelA(base_model) model.compile(loss=loss, optimizer=optimizer, metrics=metrics) return model
def transfer_resnet152(): resnet152 = ResNet152(include_top=False,weights='imagenet',input_shape=(160,160,3)) resnet152_preprocess = tf.keras.applications.resnet50.preprocess_input inputs = tf.keras.Input(shape=(160,160,3)) x = resnet152_preprocess(inputs) x = resnet152(inputs,training=False) x = tf.keras.layers.GlobalAveragePooling2D()(x) outputs = tf.keras.layers.Dense(units=1,activation='sigmoid')(x) custom_resnet152 = tf.keras.Model(inputs,outputs) custom_resnet152.summary() return custom_resnet152
def backbone(x): if backbone_type == 'ResNet50': extractor = ResNet50(input_shape=x.shape[1:], include_top=False, weights=weights) pick_layer1 = 80 # [80, 80, 512] pick_layer2 = 142 # [40, 40, 1024] pick_layer3 = 174 # [20, 20, 2048] pick_layer4 = [] preprocess = tf.keras.applications.resnet.preprocess_input elif backbone_type == 'ResNet152': extractor = ResNet152(input_shape=x.shape[1:], include_top=False, weights=weights) pick_layer1 = 38 # [160, 160, 256] pick_layer2 = 120 # [80, 80, 512] pick_layer3 = 482 # [40, 40, 1024] pick_layer4 = 514 # [20, 20, 2048] preprocess = tf.keras.applications.resnet.preprocess_input elif backbone_type == 'MobileNetV2': extractor = MobileNetV2(input_shape=x.shape[1:], include_top=False, weights=weights) pick_layer1 = 54 # [80, 80, 32] pick_layer2 = 116 # [40, 40, 96] pick_layer3 = 143 # [20, 20, 160] pick_layer4 = [] preprocess = tf.keras.applications.mobilenet_v2.preprocess_input else: raise NotImplementedError( 'Backbone type {} is not recognized.'.format(backbone_type)) if levels == '5': model = Model(extractor.input, (extractor.layers[pick_layer1].output, extractor.layers[pick_layer2].output, extractor.layers[pick_layer3].output, extractor.layers[pick_layer4].output), name=backbone_type + '_extrator')(preprocess(x)) else: model = Model(extractor.input, (extractor.layers[pick_layer1].output, extractor.layers[pick_layer2].output, extractor.layers[pick_layer3].output), name=backbone_type + '_extrator')(preprocess(x)) return model
def get_encoder_model(name, in_shape, pooling): if name == "InceptionV3": model = InceptionV3(include_top=False, input_shape=in_shape, weights=None, pooling=pooling) elif name == "ResNet50": model = ResNet50(include_top=False, input_shape=in_shape, weights=None, pooling=pooling) elif name == "ResNet50V2": model = ResNet50V2(include_top=False, input_shape=in_shape, weights=None, pooling=pooling) elif name == "ResNet101": model = ResNet101(include_top=False, input_shape=in_shape, weights=None, pooling=pooling) elif name == "ResNet101V2": model = ResNet101V2(include_top=False, input_shape=in_shape, weights=None, pooling=pooling) elif name == "ResNet152": model = ResNet152(include_top=False, input_shape=in_shape, weights=None, pooling=pooling) elif name == "InceptionResNetV2": model = InceptionResNetV2(include_top=False, input_shape=in_shape, weights=None, pooling=pooling) elif name == "DenseNet121": model = DenseNet121(include_top=False, input_shape=in_shape, weights=None, pooling=pooling) else: raise ValueError("model " + name + " not found") return model
def create_model(self): base_model = ResNet152(weights=None, include_top=False, input_shape=(IM_HEIGHT, IM_WIDTH, 4)) x = base_model.output x = tf.keras.layers.GlobalAveragePooling2D()(x) # x = tf.keras.layers.GlobalMaxPooling2D()(x) predictions = tf.keras.layers.Dense(3, activation="relu")(x) model = tf.keras.Model(inputs=base_model.input, outputs=predictions) Admax_Optim = tf.keras.optimizers.Adamax(learning_rate=0.001) model.compile( loss="mse", # Mean Squared Error optimizer=Admax_Optim, metrics=["accuracy"]) return model
def keras_resnet152(clsses): # create the base pre-trained model base_model = ResNet152(weights='imagenet', include_top=False) # add a global spatial average pooling layer x = base_model.output x = GlobalAveragePooling2D()(x) # let's add a fully-connected layer x = Dense(1024, activation='relu')(x) # and a logistic layer -- let's say we have 200 classes predictions = Dense(clsses, activation='softmax')(x) # this is the model we will train2 model = Model(inputs=base_model.input, outputs=predictions) # first: train2 only the top layers (which were randomly initialized) # i.e. freeze all convolutional resnet50 layers # for layer in base_model.layers: # layer.trainable = False model.summary() return model
def get_model(weights, length, shape): if (weights == None): print('Using Random weights') else: print('Using ' + str(weights) + ' weights') assert length in [50, 101, 152] if (length == 50): from tensorflow.keras.applications import ResNet50 model = ResNet50(include_top=False, weights=weights, input_shape=shape) elif (length == 101): from tensorflow.keras.applications import ResNet101 model = ResNet101(include_top=False, weights=weights, input_shape=shape) elif (length == 152): from tensorflow.keras.applications import ResNet152 model = ResNet152(include_top=False, weights=weights, input_shape=shape) return model
def __init__(self, weights_init, model_architecture='vgg16'): self.weights_init = weights_init if model_architecture == 'vgg16': self.model = VGG16(weights=self.weights_init, include_top=False) self.bridge_list = [2, 5, 9, 13, 17] elif model_architecture == 'vgg19': self.model = VGG19(weights=self.weights_init, include_top=False) self.bridge_list = [2, 5, 10, 15, 20] elif model_architecture == 'resnet50': self.model = ResNet50(weights=self.weights_init, include_top=False) self.bridge_list = [4, 38, 80, 142, -1] elif model_architecture == 'resnet50v2': self.model = ResNet50V2(weights=self.weights_init, include_top=False) self.bridge_list = [2, 27, 62, 108, -1] elif model_architecture == 'resnet101': self.model = ResNet101(weghts=self.weights_init, include_top=False) self.bridge_list = [4, 38, 80, 312, -1] elif model_architecture == 'resnet101v2': self.model = ResNet101V2(weights=self.weights_init, include_top=False) self.bridge_list = [2, 27, 62, 328, -1] elif model_architecture == 'resnet152': self.model = ResNet152(weights=self.weights_init, include_top=False) self.bridge_list = [4, 38, 120, 482, -1] elif model_architecture == 'resnet152v2': self.model = ResNet152V2(weights=self.weights_init, include_top=False) self.bridge_list = [2, 27, 117, 515, -1]
# define subplot pyplot.subplot(330 + 1 + i) # plot raw pixel data pyplot.imshow(X_train[i]) # show the figure pyplot.show() print("Labels: ") print(y_train[:3].flatten()) print(y_train[3:6].flatten()) print(y_train[6:9].flatten()) print(f"Image size: {X_train.shape}") # %% # import model base_resnet = ResNet152(weights='imagenet', include_top=False, input_shape=(32,32,3)) # the resnet layers should be frozen = not trained for layer in base_resnet.layers: layer.trainable = False # Functional model API type(base_resnet) # Add head aka last layer x = layers.Flatten()(base_resnet.output) x = layers.Dense(256, activation='relu')(x) x = layers.Dropout(0.2)(x) x = layers.Dense(256, activation='relu')(x) x = layers.Dropout(0.2)(x) x = layers.Dense(256, activation='relu')(x) x = layers.Dropout(0.2)(x)
input_shape_img = (224, 224, 3) img_input = Input(shape=input_shape_img) base_layers = ResNet50(weights='imagenet', include_top=False, input_tensor=img_input) C.network = 'resnet50' elif options.network == 'resnet152': from src.architectures import resnet152 as nn # define input of our network input_shape_img = (224, 224, 3) img_input = Input(shape=input_shape_img) base_layers = ResNet152(weights='imagenet', include_top=False, input_tensor=img_input) C.network = 'resnet152' elif options.network == 'efficientnet-b0': from src.architectures import efficientnet as nn # define input of our network input_shape_img = (224, 224, 3) img_input = Input(shape=input_shape_img) base_layers = nn.nn_base(1.0, 1.0, input_tensor=img_input, dropout_rate=0.2) base_layers = Model(img_input, base_layers) base_layers.load_weights(
for corrida in range(1, 4): if network == "Xception": print(network) train, test, lb2, labelsTest = preprocessing(network) baseModel = Xception(weights=pretraining, include_top=False, input_tensor=Input(shape=(299, 299, 3)), pooling="avg") headModel = baseModel.output headModel = Dense(51, activation='softmax')(headModel) elif network == "ResNet152": print(network) train, test, lb2, labelsTest = preprocessing(network) baseModel = ResNet152(weights=pretraining, include_top=False, input_tensor=Input(shape=(224, 224, 3)), pooling="avg") headModel = baseModel.output headModel = Dense(51, activation='softmax')(headModel) elif network == "InceptionV3": print(network) train, test, lb2, labelsTest = preprocessing(network) baseModel = InceptionV3(weights=pretraining, include_top=False, input_tensor=Input(shape=(299, 299, 3)), pooling="avg") headModel = baseModel.output headModel = Dense(51, activation='softmax')(headModel) elif network == "MobileNetV2": print(network) train, test, lb2, labelsTest = preprocessing(network)
# vgg16.summary() print("VGG19레이어 수 ",len(vgg16.trainable_weights)/2) print('----------------------------------------------------------------------------') vgg16 = Xception() # vgg16.summary() print("Xception",len(vgg16.trainable_weights)/2) print('----------------------------------------------------------------------------') vgg16 = ResNet101() # vgg16.summary() print("ResNet101",len(vgg16.trainable_weights)/2) print('----------------------------------------------------------------------------') vgg16 = ResNet101V2() # vgg16.summary() print("ResNet101V2",len(vgg16.trainable_weights)/2) print('----------------------------------------------------------------------------') vgg16 = ResNet152() # vgg16.summary() print("ResNet152",len(vgg16.trainable_weights)/2) print('----------------------------------------------------------------------------') vgg16 = ResNet50() # vgg16.summary() print("ResNet50",len(vgg16.trainable_weights)/2) print('----------------------------------------------------------------------------') vgg16 = ResNet50V2() # vgg16.summary() print("ResNet50V2",len(vgg16.trainable_weights)/2) print('----------------------------------------------------------------------------') vgg16 = NASNetLarge() # vgg16.summary() print("NASNetLarge",len(vgg16.trainable_weights)/2)
def set_resnet(model_class): # 建立基网络 base = ResNet152(include_top=False, weights='imagenet') net = model_class(base) return net
include_top=False, input_tensor=img_input) elif C.network == 'vgg19': from src.architectures import vgg19 as nn base_layers = VGG19(weights=None, include_top=False, input_tensor=img_input) elif C.network == 'resnet50': from src.architectures import resnet50 as nn base_layers = ResNet50(weights=None, include_top=False, input_tensor=img_input) elif C.network == 'resnet152': from src.architectures import resnet152 as nn base_layers = ResNet152(weights=None, include_top=False, input_tensor=img_input) with tf.device(device): print('Loading weights from {}'.format(options.weights)) classifier = nn.classifier(base_layers.output, trainable=False) optimizer = Adam(learning_rate=0.0001) model = Model(inputs=base_layers.input, outputs=classifier) model.load_weights(options.weights) model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
weights='imagenet')), ("MobileNetV2", MobileNetV2(input_shape=IMG_SHAPE, include_top=False, weights='imagenet')), ("ResNet101", ResNet101(input_shape=IMG_SHAPE, include_top=False, weights='imagenet')), ("ResNet101V2", ResNet101V2(input_shape=IMG_SHAPE, include_top=False, weights='imagenet')), ("ResNet152", ResNet152(input_shape=IMG_SHAPE, include_top=False, weights='imagenet')), ("ResNet152V2", ResNet152V2(input_shape=IMG_SHAPE, include_top=False, weights='imagenet')), ("ResNet50", ResNet50(input_shape=IMG_SHAPE, include_top=False, weights='imagenet')), ("ResNet50V2", ResNet50V2(input_shape=IMG_SHAPE, include_top=False, weights='imagenet')), ("VGG16", VGG16(input_shape=IMG_SHAPE,
def get_arch(arg, input_shape, classes, **kwargs): input_tensor = Input(shape=input_shape) if "Normalization" in kwargs: if kwargs["Normalization"] == "BatchNormalization": kwargs["Normalization"] = BatchNormalization elif kwargs["Normalization"] == "LayerNormalization": kwargs["Normalization"] = LayerNormalization elif kwarfs["Normalization"] == "NoNormalization": kwargs["Normalization"] = NoNormalization # if its not a string assume its a normalization layer elif type(kwargs["Normalization"]) == str: print("Warning: couldn't understand your normalization") kwargs["Normalization"] = NoNormalization if arg == "AlexNet": return AlexNet(input_tensor=input_tensor, classes=classes, **kwargs) elif arg == "SmolAlexNet": return SmolAlexNet(input_tensor=input_tensor, classes=classes, **kwargs) elif arg == "VGG16": return VGG16(input_tensor=input_tensor, classes=classes, weights=None, **kwargs) elif arg == "VGG19": return VGG19(input_tensor=input_tensor, classes=classes, weights=None, **kwargs) elif arg == "ResNet50": return ResNet50(input_tensor=input_tensor, classes=classes, weights=None, **kwargs) elif arg == "ResNet152": return ResNet152(input_tensor=input_tensor, classes=classes, weights=None, **kwargs) elif arg == "CifarResNet": return CifarResNet(3, input_tensor=input_tensor, classes=classes) elif arg == "DenseNet169": return DenseNet169(input_tensor=input_tensor, classes=classes, weights=None, **kwargs) elif arg == "DenseNet121": return DenseNet121(input_tensor=input_tensor, classes=classes, weights=None, **kwargs) elif arg == "MobileNetV2": return MobileNetV2(input_tensor=input_tensor, classes=classes, weights=None, **kwargs) elif arg == "DenseNetCifar": return DenseNetCifar(input_tensor, classes, 12, 16) else: show_available() raise Exception(arg + " not an available architecture")
def CNN_model(self, learning_rate, epoch, batchsize, whether_Adam, Momentum_gamma, weight_decay, whether_load, cnn_type): """ Resnet model :param learning_rate :param epoch :param batchsize :param whether_Adam: whether to perform Adam optimiser, if not perform Momentum :param Momentum gamma: a variable of Momentum :param weight_decay: weight decay for Momentum :param whether_load: whether to load trained Resnet model in if it exists (or cover it) """ test_cnn_mfcc = self.train_mfcc test_cnn_label = self.train_label if(isfile("model/resnet_label.hdf5") and whether_load): self.cnn_model = load_model("model/resnet_label.hdf5") else: train_cnn_mfcc = self.test_mfcc train_cnn_label = self.test_label val_cnn_mfcc = self.validate_mfcc val_cnn_label = self.validate_label # input input = Input(shape=(self.test_mfcc.shape[1], self.test_mfcc.shape[2], 1)) # Concatenate -1 dimension to be three channels, to fit the input need in ResNet50 input_concate = Concatenate()([input,input,input]) # CNN series network (VGG+Resnet) # reference: https://keras.io/api/applications/ if(cnn_type == 'ResNet50'): from tensorflow.keras.applications import ResNet50 cnn_output = ResNet50(pooling = 'avg')(input_concate) elif(cnn_type == 'ResNet101'): from tensorflow.keras.applications import ResNet101 cnn_output = ResNet101(pooling = 'avg')(input_concate) elif(cnn_type == 'ResNet152'): from tensorflow.keras.applications import ResNet152 cnn_output = ResNet152(pooling = 'avg')(input_concate) elif(cnn_type == 'ResNet50V2'): from tensorflow.keras.applications import ResNet50V2 cnn_output = ResNet50V2(pooling = 'avg')(input_concate) elif(cnn_type == 'ResNet101V2'): from tensorflow.keras.applications import ResNet101V2 cnn_output = ResNet101V2(pooling = 'avg')(input_concate) elif(cnn_type == 'ResNet152V2'): from tensorflow.keras.applications import ResNet152V2 cnn_output = ResNet152V2(pooling = 'avg')(input_concate) elif(cnn_type == 'VGG16'): # width and height should not smaller than 32 from tensorflow.keras.applications import VGG16 cnn_output = VGG16(include_top = False, pooling = 'avg')(input_concate) cnn_output = Flatten()(cnn_output) elif(cnn_type == 'VGG19'): # width and height should not smaller than 32 from tensorflow.keras.applications import VGG19 cnn_output = VGG19(include_top = False, pooling = 'avg')(input_concate) cnn_output = Flatten()(cnn_output) else: # CNN layers we design print("No recognised CNN network. The CNN layers we designed are performed") # convolution layers conv_output1 = Conv2D(filters=32, strides=(1, 1), kernel_size=5, activation='relu')(input) # pool_output1 = MaxPool2D(pool_size=(2, 2))(conv_output1) conv_output2 = Conv2D(filters=8, strides=(2, 2), kernel_size=4, activation='relu')(conv_output1) conv_output2 = Dropout(0.2)(conv_output2) conv_output2_batch = BatchNormalization()(conv_output2) cnn_output = Flatten()(conv_output2_batch) cnn_output = Flatten()(cnn_output) # dense with sigmoid Dense_sigmoid = Dense(24, activation='sigmoid')(cnn_output) Dense_sigmoid = Dropout(0.2)(Dense_sigmoid) # dense output output = Dense(self.test_label.shape[1], activation='softmax')(Dense_sigmoid) # cnn model for labels recognision self.cnn_model = Model(input, output) # optimizer if whether_Adam: optimizer = optimizers.Adam(lr=learning_rate, beta_1 = Momentum_gamma, decay=weight_decay) else: optimizer = optimizers.SGD(lr=learning_rate, momentum=Momentum_gamma, nesterov=True, decay=weight_decay) self.cnn_model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['mse', 'accuracy']) start = time.time() self.history = self.cnn_model.fit(train_cnn_mfcc, train_cnn_label, epochs=epoch, batch_size=batchsize, validation_data=[val_cnn_mfcc,val_cnn_label]) self.training_time = time.time() - start self.cnn_model.save("model/resnet_label.hdf5") # model evaluation self.cnn_model.predict(test_cnn_mfcc) self.score = self.cnn_model.evaluate(test_cnn_mfcc, test_cnn_label) print("test loss: ", self.score[0], ", mse: ", self.score[1], ", accuracy", self.score[2])