def get_model(): input_shape = (2, 224, 224, 3) input_pair = Input(input_shape, name='image_pair_input') lft_layer = Lambda(lambda x: x[:, 0, ...], name='siamese_branch_1') rgt_layer = Lambda(lambda x: x[:, 1, ...], name='siamese_branch_2') lft_input = lft_layer(input_pair) rgt_input = rgt_layer(input_pair) # get pretrained model from Keras applications and freeze weights encoder = densenet.DenseNet121(input_shape=(224, 224, 3), include_top=False, weights='imagenet') for layer in encoder.layers: layer.trainable = False layer.trainable = True # unfreeze last layer fine_tune = get_trainable_encoder() # build the two "siamese" networks lft_encoded = fine_tune(encoder(lft_input)) rgt_encoded = fine_tune(encoder(rgt_input)) l1_layer = Lambda(lambda xs: K.abs(xs[0] - xs[1]), name='discriminator_input') l1_distance = l1_layer([lft_encoded, rgt_encoded]) output = Dense(1, activation='hard_sigmoid', name='depth_prediction')(l1_distance) model = Model(inputs=input_pair, outputs=output, name='siamese_network') model.compile(loss='mean_squared_error', optimizer=Adam()) return model
def build_model(): base_model = densenet.DenseNet121( input_shape=(224, 224, 3), weights='./full-keras-pretrained-no-top' '/densenet121_weights_tf_dim_ordering_tf_kernels_notop.h5', include_top=False, pooling='avg') for layer in base_model.layers: layer.trainable = True x = base_model.output x = Dense(1000, kernel_regularizer=regularizers.l1_l2(0.01), activity_regularizer=regularizers.l2(0.01))(x) x = Activation('relu')(x) x = Dense(500, kernel_regularizer=regularizers.l1_l2(0.01), activity_regularizer=regularizers.l2(0.01))(x) x = Activation('relu')(x) predictions = Dense(len(class_names), activation='softmax')(x) created_model = Model(inputs=base_model.input, outputs=predictions) return created_model
def get_model(model_name): if model_name == "vgg16": return vgg16.VGG16(weights='imagenet' ), vgg16.decode_predictions, vgg16.preprocess_input elif model_name == "vgg19": return vgg19.VGG19(weights='imagenet' ), vgg19.decode_predictions, vgg19.preprocess_input elif model_name == "resnet50": return resnet50.ResNet50( weights='imagenet' ), resnet50.decode_predictions, resnet50.preprocess_input elif model_name == "resnet101": return ResNet101(weights='imagenet' ), resnet.decode_predictions, resnet.preprocess_input elif model_name == "mobilenet": return mobilenet.MobileNet( weights='imagenet' ), mobilenet.decode_predictions, mobilenet.preprocess_input elif model_name == "densenet": return densenet.DenseNet121( weights='imagenet' ), densenet.decode_predictions, densenet.preprocess_input
def DenseNet121(input_shape, num_output, **kwargs): return densenet.DenseNet121(input_shape=input_shape, classes=num_output, weights=None, **kwargs)
test_datagen = ImageDataGenerator(rescale=1. / 255.) test_generator = test_datagen.flow_from_dataframe( dataframe=testdf, directory= "D:/My files/courses/108-1/Machine learning & DL/Final project/Task2/test_img/", x_col="image", y_col=None, batch_size=1, seed=42, shuffle=False, class_mode=None, target_size=(224, 224)) number_of_classes = 2 #base_model = densenet.DenseNet121(weights='densenet121_weights_tf_dim_ordering_tf_kernels_notop.h5', include_top=False) base_model = densenet.DenseNet121(weights='imagenet', include_top=False) x = base_model.output x = GlobalAveragePooling2D()(x) x = Dense(512, activation='relu')(x) x = Dense(512, activation='relu')(x) x = Dense(256, activation='relu')(x) preds = Dense(number_of_classes, activation='softmax')(x) model = Model(inputs=base_model.input, outputs=preds) # Print the updated layer names. # for i,layer in enumerate(model.layers): print(i,layer.name) # Set the first n_freeze layers of the network to be non-trainable. n_freeze = 300 for layer in model.layers[:n_freeze]: layer.trainable = False for layer in model.layers[n_freeze:]: layer.trainable = True
model = load_model(model_path) except OSError: sys.exit("Model is not trained yet! Run train_model.py first.") elif data_set == "ILSVRC": # change here for the datapath to ILSVRC2017 data_path = "../data/ILSVRC/Data/DET/test/" ks = 4.0 # load the model if model_name == "MobileNetV2": model = mobilenet_v2.MobileNetV2(weights='imagenet') input_shape = 224 preprocess = mobilenet_v2.preprocess_input elif model_name == "DenseNet121": model = densenet.DenseNet121(weights='imagenet') input_shape = 224 preprocess = densenet.preprocess_input elif model_name == "InceptionV3": model = inception_v3.InceptionV3(weights='imagenet') input_shape = 299 preprocess = inception_v3.preprocess_input # replacement (None -> mean replacement otherwise color) hide_color = None # results from the experiment will be saved here result_path = "results/" + data_set + "/" + model_name + "/approx/" mkdir(result_path) # segmentation algorithm (default kernel_size = 4, max_dist = 200, ratio = 0.2)
def get_siamese_model(name=None, input_shape=(224, 224, 3), embedding_vec_size=512, not_freeze_last=2): """ Model architecture """ if name == "InceptionV3": base_model = inception_v3.InceptionV3( weights='imagenet', include_top=False) model_preprocess_input = inception_v3.preprocess_input if name == "InceptionResNetV2": base_model = inception_resnet_v2.InceptionResNetV2( weights='imagenet', include_top=False) model_preprocess_input = inception_resnet_v2.preprocess_input if name == "DenseNet121": base_model = densenet.DenseNet121( weights='imagenet', include_top=False) model_preprocess_input = densenet.preprocess_input if name == "DenseNet169": base_model = densenet.DenseNet169( weights='imagenet', include_top=False) model_preprocess_input = densenet.preprocess_input if name == "DenseNet201": base_model = densenet.DenseNet201( weights='imagenet', include_top=False) model_preprocess_input = densenet.preprocess_input if name == "MobileNetV2": base_model = mobilenet_v2.MobileNetV2( weights='imagenet', include_top=False) model_preprocess_input = mobilenet_v2.preprocess_input if name == "MobileNet": base_model = mobilenet.MobileNet( weights='imagenet', include_top=False) model_preprocess_input = mobilenet.preprocess_input if name == "ResNet50": base_model = resnet50.ResNet50( weights='imagenet', include_top=False) model_preprocess_input = resnet50.preprocess_input if name == "VGG16": base_model = vgg16.VGG16( weights='imagenet', include_top=False) model_preprocess_input = vgg16.preprocess_input if name == "VGG19": base_model = vgg19.VGG19( weights='imagenet', include_top=False) model_preprocess_input = vgg19.preprocess_input if name == "Xception": base_model = xception.Xception( weights='imagenet', include_top=False) model_preprocess_input = xception.preprocess_input # Verifica se existe base_model if 'base_model' not in locals(): return ["InceptionV3", "InceptionResNetV2", "DenseNet121", "DenseNet169", "DenseNet201", "MobileNetV2", "MobileNet", "ResNet50", "VGG16", "VGG19", "Xception" ] # desativando treinamento for layer in base_model.layers[:-not_freeze_last]: layer.trainable = False x = base_model.layers[-1].output x = GlobalAveragePooling2D()(x) x = Dense( embedding_vec_size, activation='linear', # sigmoid? relu? name='embedding', use_bias=False )(x) model = Model( inputs=base_model.input, outputs=x ) left_input = Input(input_shape) right_input = Input(input_shape) # Generate the encodings (feature vectors) for the two images encoded_l = model(left_input) encoded_r = model(right_input) # Add a customized layer to compute the absolute difference between the encodings L1_layer = Lambda(lambda tensors: K.abs(tensors[0] - tensors[1])) L1_distance = L1_layer([encoded_l, encoded_r]) # Add a dense layer with a sigmoid unit to generate the similarity score prediction = Dense( 1, activation=Activation(gaussian), use_bias=False, kernel_constraint=NonNeg() )(L1_distance) # Connect the inputs with the outputs siamese_net = Model( inputs=[left_input, right_input], outputs=prediction ) return { "model": siamese_net, "preprocess_input": model_preprocess_input }
def set_model(self, model_name, top_n=5): if model_name == 'densenet': self.model = densenet.DenseNet121(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000) self.target_size = (224, 224) self.decoder = lambda x: densenet.decode_predictions(x, top=top_n) self.ref = """ <ul> <li><a href='https://arxiv.org/abs/1608.06993' target='_blank'> Densely Connected Convolutional Networks</a> (CVPR 2017 Best Paper Award)</li> </ul> """ elif model_name == 'inception_resnet_v2': self.model = inception_resnet_v2.InceptionResNetV2( include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000) self.target_size = (299, 299) self.decoder = lambda x: inception_resnet_v2.decode_predictions( x, top=top_n) self.ref = """ <ul> <li><a href='https://arxiv.org/abs/1602.07261' target='_blank'> Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning</a></li> </ul> """ elif model_name == 'inception_v3': self.model = inception_v3.InceptionV3(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000) self.target_size = (299, 299) self.decoder = lambda x: inception_v3.decode_predictions(x, top=top_n) self.ref = """<ul> <li><a href='https://arxiv.org/abs/1512.00567' target='_blank'> Rethinking the Inception Architecture for Computer Vision</a></li> </ul> """ elif model_name == 'mobilenet': self.model = mobilenet.MobileNet(input_shape=None, alpha=1.0, depth_multiplier=1, dropout=1e-3, include_top=True, weights='imagenet', input_tensor=None, pooling=None, classes=1000) self.target_size = (224, 224) self.decoder = lambda x: mobilenet.decode_predictions(x, top=top_n) self.ref = """<ul> <li><a href='https://arxiv.org/abs/1704.04861' target='_blank'> MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications</a></li> </ul> """ elif model_name == 'mobilenet_v2': self.model = mobilenet_v2.MobileNetV2(input_shape=None, alpha=1.0, include_top=True, weights='imagenet', input_tensor=None, pooling=None, classes=1000) self.target_size = (224, 224) self.decoder = lambda x: mobilenet_v2.decode_predictions(x, top=top_n) self.ref = """<ul> <li><a href='https://arxiv.org/abs/1801.04381' target='_blank'> MobileNetV2: Inverted Residuals and Linear Bottlenecks</a></li> </ul> """ elif model_name == 'nasnet': self.model = nasnet.NASNetLarge(input_shape=None, include_top=True, weights='imagenet', input_tensor=None, pooling=None, classes=1000) self.target_size = (224, 224) self.decoder = lambda x: nasnet.decode_predictions(x, top=top_n) self.ref = """<ul> <li><a href='https://arxiv.org/abs/1707.07012' target='_blank'> Learning Transferable Architectures for Scalable Image Recognition</a></li> </ul> """ elif model_name == 'resnet50': self.model = resnet50.ResNet50(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000) self.target_size = (224, 224) self.decoder = lambda x: resnet50.decode_predictions(x, top=top_n) self.ref = """<ul> <li>ResNet : <a href='https://arxiv.org/abs/1512.03385' target='_blank'>Deep Residual Learning for Image Recognition </a></li> </ul> """ elif model_name == 'vgg16': self.model = vgg16.VGG16(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000) self.target_size = (224, 224) self.decoder = lambda x: vgg16.decode_predictions(x, top=top_n) self.ref = """<ul> <li><a href='https://arxiv.org/abs/1409.1556' target='_blank'> Very Deep Convolutional Networks for Large-Scale Image Recognition</a></li> </ul>""" elif model_name == 'vgg19': self.model = vgg19.VGG19(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000) self.target_size = (224, 224) self.decoder = lambda x: vgg19.decode_predictions(x, top=top_n) self.ref = """<ul> <li><a href='https://arxiv.org/abs/1409.1556' target='_blank'>Very Deep Convolutional Networks for Large-Scale Image Recognition</a></li> </ul>""" elif model_name == 'xception': self.model = xception.Xception(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000) self.target_size = (299, 299) self.decoder = lambda x: xception.decode_predictions(x, top=top_n) self.ref = """<ul> <li><a href='https://arxiv.org/abs/1610.02357' target='_blank'>Xception: Deep Learning with Depthwise Separable Convolutions</a></li> </ul>""" else: logger.ERROR('There has no model name !!!')