def test_DeepFashion(self): base_model = keras.applications.VGG16(weights=None, include_top=False, input_shape=(224, 224, 3)) model_inputs = base_model.input common_inputs = base_model.output dropout_rate = 0.5 output_classes = 20 x = Flatten()(common_inputs) x = Dense(256, activation='tanh')(x) x = Dropout(dropout_rate)(x) predictions_class = Dense(output_classes, activation='softmax', name='predictions_class')(x) ## Model (Regression) IOU score x = Flatten()(common_inputs) x = Dense(256, activation='tanh')(x) x = Dropout(dropout_rate)(x) x = Dense(256, activation='tanh')(x) x = Dropout(dropout_rate)(x) predictions_iou = Dense(1, activation='sigmoid', name='predictions_iou')(x) ## Create Model keras_model = Model(inputs=model_inputs, outputs=[predictions_class, predictions_iou]) res = run_image(keras_model, self.model_files, img_path, atol=5e-3, target_size=224, compare_perf=True) self.assertTrue(*res)
def test_SmileCNN(self): # From https://github.com/kylemcdonald/SmileCNN/blob/master/2%20Training.ipynb nb_filters = 32 nb_pool = 2 nb_conv = 3 nb_classes = 2 model = Sequential() model.add( Conv2D(nb_filters, (nb_conv, nb_conv), activation='relu', input_shape=(32, 32, 3))) model.add(Conv2D(nb_filters, (nb_conv, nb_conv), activation='relu')) model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(nb_classes, activation='softmax')) res = run_image(model, self.model_files, img_path, atol=5e-3, target_size=32) self.assertTrue(*res)
def test_pspnet(self): # From https://github.com/divamgupta/image-segmentation-keras/models/pspnet.py from keras_segmentation.models.basic_models import vanilla_encoder img_input, levels = vanilla_encoder(input_height=384, input_width=576) o = levels[4] pool_factors = [1, 2, 3, 6] pool_outs = [o] IMAGE_ORDERING = 'channels_last' if IMAGE_ORDERING == 'channels_first': MERGE_AXIS = 1 elif IMAGE_ORDERING == 'channels_last': MERGE_AXIS = -1 for p in pool_factors: pooled = self._pool_block(o, p, IMAGE_ORDERING) pool_outs.append(pooled) o = Concatenate(axis=MERGE_AXIS)(pool_outs) o = Conv2D(512, (1, 1), data_format=IMAGE_ORDERING, use_bias=False)(o) o = BatchNormalization()(o) o = Activation('relu')(o) o = Conv2D(101, (3, 3), data_format=IMAGE_ORDERING, padding='same')(o) o = keras_segmentation.models.model_utils.resize_image( o, (8, 8), data_format=IMAGE_ORDERING) model = keras_segmentation.models.model_utils.get_segmentation_model( img_input, o) model.model_name = "pspnet" res = run_image(model, self.model_files, img_path, target_size=(384, 576)) self.assertTrue(*res)
def test_ExpantionSuperResolution(self): init = Input(shape=(32, 32, 3)) x = Convolution2D(64, (9, 9), activation='relu', padding='same', name='level1')(init) x1 = Convolution2D(32, (1, 1), activation='relu', padding='same', name='lavel1_1')(x) x2 = Convolution2D(32, (3, 3), activation='relu', padding='same', name='lavel1_2')(x) x3 = Convolution2D(32, (5, 5), activation='relu', padding='same', name='lavel1_3')(x) x = Average()([x1, x2, x3]) out = Convolution2D(3, (5, 5), activation='relu', padding='same', name='output')(x) model = keras.models.Model(init, out) res = run_image(model, self.model_files, img_path, atol=5e-3, target_size=32) self.assertTrue(*res)
def test_SEResNext(self): K.clear_session() input_shape = (112, 112, 3) depth = 29 cardinality = 8 width = 64 weight_decay = 5e-4, include_top = True pooling = None classes = 10 img_input = keras.layers.Input(shape=input_shape) x = create_res_next(initial_conv_block_inception, se_bottleneck_block, classes, img_input, include_top, depth, cardinality, width, weight_decay, pooling) inputs = img_input keras_model = Model(inputs, x, name='se_resnext') res = run_image(keras_model, self.model_files, img_path, atol=5e-3, target_size=112, tf_v2=True) self.assertTrue(*res)
def test_fcn(self): # From https://github.com/divamgupta/image-segmentation-keras/models/fcn.py model = keras_segmentation.models.fcn.fcn_8(101) res = run_image(model, self.model_files, img_path, target_size=(416, 608)) self.assertTrue(*res)
def test_mobilenet_segnet(self): # From https://github.com/divamgupta/image-segmentation-keras/blob/master/keras_segmentation/models/segnet.py model = keras_segmentation.models.segnet.mobilenet_segnet(101) res = run_image(model, self.model_files, img_path, target_size=(224, 224)) self.assertTrue(*res)
def test_unet_3(self): # From https://github.com/yu4u/noise2noise/blob/master/model.py model = get_unet_model(out_ch=3, upconv=False) res = run_image(model, self.model_files, img_path, target_size=(256, 256, 3)) self.assertTrue(*res)
def test_densenet(self): # From https://github.com/titu1994/DenseNet/blob/master/densenet.py sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../model_source/densenet_1/')) import densenet_1 image_dim = (224, 224, 3) model = densenet_1.DenseNetImageNet121(input_shape=image_dim) res = run_image(model, self.model_files, img_path, target_size=(224, 224)) self.assertTrue(*res)
def test_custom(self): from efficientnet import tfkeras as efn keras.backend.set_learning_phase(0) base_model = efn.EfficientNetB0(input_shape=(600, 600, 3), weights=None) backbone = keras.Model(base_model.input, base_model.get_layer("top_activation").output) res = run_image(backbone, self.model_files, img_path, target_size=(600, 600), rtol=1e-2, atol=1e-1, tf_v2=True) self.assertTrue(*res)
def test_Xception(self): from keras.applications.xception import Xception model = Xception(include_top=True, weights='imagenet') res = run_image(model, self.model_files, img_path, atol=5e-3, target_size=299) self.assertTrue(*res)
def test_Xception(self): Xception = keras.applications.xception.Xception model = Xception(include_top=True, weights=None) res = run_image(model, self.model_files, img_path, atol=5e-3, target_size=299, tf_v2=True) self.assertTrue(*res)
def test_efn(self): from efficientnet import keras as efn keras.backend.set_learning_phase(0) model = efn.EfficientNetB7(weights='imagenet') res = run_image(model, self.model_files, img_path, target_size=(600, 600), rtol=1e-1) self.assertTrue(*res)
def test_InceptionV3(self): keras.backend.set_learning_phase(0) InceptionV3 = keras.applications.inception_v3.InceptionV3 model = InceptionV3(include_top=True) model.save('inception.h5') res = run_image(model, self.model_files, img_path, target_size=299, tf_v2=True) self.assertTrue(*res)
def test_densenet(self): # From https://github.com/tdeboissiere/DeepLearningImplementations/blob/master/DenseNet/densenet.py sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../model_source/densenet_2/')) import densenet_2 model = densenet_2.DenseNet(20, (224, 224, 3), 4, 1, 1, nb_filter=10) res = run_image(model, self.model_files, img_path, target_size=(224, 224)) self.assertTrue(*res)
def test_unet_plus_plus(self): backbone_name = 'vgg16' input_shape = (None, None, 3) input_tensor = None encoder_weights = None#'imagenet' backbone = VGG16(input_shape=input_shape, input_tensor=input_tensor, weights=encoder_weights, include_top=False) input = backbone.input x = backbone.output block_type = 'transpose' if block_type == 'transpose': up_block = Transpose2D_block else: up_block = Upsample2D_block skip_connection_layers = ('block5_conv3', 'block4_conv3', 'block3_conv3', 'block2_conv2', 'block1_conv2') # convert layer names to indices skip_connection_idx = ([get_layer_number(backbone, l) if isinstance(l, str) else l for l in skip_connection_layers]) n_upsample_blocks = 5 upsample_rates = (2,2,2,2,2) decoder_filters = (256,128,64,32,16) block_type='upsampling' activation='sigmoid' use_batchnorm=True classes=1 for i in range(n_upsample_blocks): # check if there is a skip connection skip_connection = None if i < len(skip_connection_idx): skip_connection = backbone.layers[skip_connection_idx[i]].output upsample_rate = to_tuple(upsample_rates[i]) x = up_block(decoder_filters[i], i, upsample_rate=upsample_rate, skip=skip_connection, use_batchnorm=use_batchnorm)(x) x = Conv2D(classes, (3,3), padding='same', name='final_conv')(x) x = Activation(activation, name=activation)(x) model = Model(input, x) res = run_image(model, self.model_files, img_path, target_size=(256, 256, 3)) self.assertTrue(*res)
def test_FaceMaskDetection(self): mobilenet_v2 = keras.applications.mobilenet_v2 baseModel = mobilenet_v2.MobileNetV2(weights=None, include_top=False, input_tensor=Input(shape=(224, 224, 3))) headModel = baseModel.output headModel = AveragePooling2D(pool_size=(7, 7))(headModel) headModel = Flatten(name="flatten")(headModel) headModel = Dense(128, activation="relu")(headModel) headModel = Dropout(0.5)(headModel) headModel = Dense(2, activation="softmax")(headModel) model = Model(inputs=baseModel.input, outputs=headModel) res = run_image(model, self.model_files, img_path) self.assertTrue(*res)
def test_unet_2(self): # From https://github.com/jocicmarko/ultrasound-nerve-segmentation img_rows = 96 img_cols = 96 inputs = Input((img_rows, img_cols, 1)) conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(inputs) conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv1) pool1 = MaxPooling2D(pool_size=(2, 2))(conv1) conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(pool1) conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv2) pool2 = MaxPooling2D(pool_size=(2, 2))(conv2) conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(pool2) conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv3) pool3 = MaxPooling2D(pool_size=(2, 2))(conv3) conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(pool3) conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv4) pool4 = MaxPooling2D(pool_size=(2, 2))(conv4) conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(pool4) conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(conv5) up6 = concatenate([Conv2DTranspose(256, (2, 2), strides=(2, 2), padding='same')(conv5), conv4], axis=3) conv6 = Conv2D(256, (3, 3), activation='relu', padding='same')(up6) conv6 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv6) up7 = concatenate([Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(conv6), conv3], axis=3) conv7 = Conv2D(128, (3, 3), activation='relu', padding='same')(up7) conv7 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv7) up8 = concatenate([Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(conv7), conv2], axis=3) conv8 = Conv2D(64, (3, 3), activation='relu', padding='same')(up8) conv8 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv8) up9 = concatenate([Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(conv8), conv1], axis=3) conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(up9) conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv9) conv10 = Conv2D(1, (1, 1), activation='sigmoid')(conv9) model = Model(inputs=[inputs], outputs=[conv10]) res = run_image(model, self.model_files, img_path, color_mode="grayscale", target_size=(img_rows, img_cols)) self.assertTrue(*res)
def test_gender_detection(self): model = Sequential() inputShape = (224, 224, 3) chanDim = -1 model.add(Conv2D(32, (3, 3), padding="same", input_shape=inputShape)) model.add(Activation("relu")) model.add(BatchNormalization(axis=chanDim)) model.add(MaxPooling2D(pool_size=(3, 3))) model.add(Dropout(0.25)) model.add(Conv2D(64, (3, 3), padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(axis=chanDim)) model.add(Conv2D(64, (3, 3), padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(axis=chanDim)) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Conv2D(128, (3, 3), padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(axis=chanDim)) model.add(Conv2D(128, (3, 3), padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(axis=chanDim)) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(1024)) model.add(Activation("relu")) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Dense(80)) model.add(Activation("sigmoid")) res = run_image(model, self.model_files, img_path, atol=5e-3, target_size=224) self.assertTrue(*res)
def test_NASNetMobile(self): NASNetMobile = keras.applications.nasnet.NASNetMobile model = NASNetMobile(weights=None) res = run_image(model, self.model_files, img_path, tf_v2=True) self.assertTrue(*res)
def test_DenseNet121(self): from keras.applications.densenet import DenseNet121 model = DenseNet121(include_top=True, weights='imagenet') res = run_image(model, self.model_files, img_path) self.assertTrue(*res)
def test_InceptionV3(self): from keras.applications.inception_v3 import InceptionV3 model = InceptionV3(include_top=True, weights='imagenet') res = run_image(model, self.model_files, img_path, target_size=299) self.assertTrue(*res)
def test_ResNet50(self): from keras.applications.resnet50 import ResNet50 model = ResNet50(include_top=True, weights='imagenet') res = run_image(model, self.model_files, img_path) self.assertTrue(*res)
def test_MobileNetV2(self): mobilenet_v2 = keras.applications.mobilenet_v2 model = mobilenet_v2.MobileNetV2(weights='imagenet') res = run_image(model, self.model_files, img_path) self.assertTrue(*res)
def test_efn(self): from efficientnet import tfkeras as efn keras.backend.set_learning_phase(0) model = efn.EfficientNetB0(weights=None) res = run_image(model, self.model_files, img_path, target_size=(224, 224), rtol=1e-2, tf_v2=True) self.assertTrue(*res)
def test_ResNet50(self): ResNet50 = keras.applications.resnet_v2.ResNet50V2 model = ResNet50(include_top=True, weights=None) res = run_image(model, self.model_files, img_path, tf_v2=True) self.assertTrue(*res)
def test_InceptionResNetV2(self): InceptionResNetV2 = keras.applications.inception_resnet_v2.InceptionResNetV2 model = InceptionResNetV2(include_top=True) res = run_image(model, self.model_files, img_path, target_size=299, tf_v2=True) self.assertTrue(*res)
def test_MobileNetV2(self): MobileNetV2 = keras.applications.mobilenet_v2.MobileNetV2 model = MobileNetV2(weights=None) res = run_image(model, self.model_files, img_path, tf_v2=True) self.assertTrue(*res)
def test_DenseNet121(self): DenseNet121 = keras.applications.densenet.DenseNet121 model = DenseNet121(include_top=True, weights=None) res = run_image(model, self.model_files, img_path, tf_v2=True) self.assertTrue(*res)
def test_MobileNet(self): MobileNet = keras.applications.mobilenet.MobileNet model = MobileNet(weights=None) res = run_image(model, self.model_files, img_path) self.assertTrue(*res)