예제 #1
0
def run_image(model, model_files, img_path, model_name='onnx_conversion', rtol=1.e-3, atol=1.e-5, color_mode="rgb",
              target_size=224):
    preprocess_input = keras.applications.resnet50.preprocess_input
    image = keras.preprocessing.image

    try:
        if not isinstance(target_size, tuple):
            target_size = (target_size, target_size)
        if is_keras_older_than("2.2.3"):
            # color_mode is not supported in old keras version
            img = image.load_img(img_path, target_size=target_size)
        else:
            img = image.load_img(img_path, color_mode=color_mode, target_size=target_size)
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        if color_mode == "rgb":
            x = preprocess_input(x)
    except FileNotFoundError:
        return False, 'The image data does not exist.'

    msg = ''
    preds = None
    try:
        preds = model.predict(x)
    except RuntimeError:
        msg = 'keras prediction throws an exception for model ' + model.name + ', skip comparison.'

    onnx_model = keras2onnx.convert_keras(model, model.name)
    res = run_onnx_runtime(model_name, onnx_model, x, preds, model_files, rtol=rtol, atol=atol)
    return res, msg
예제 #2
0
class TestSGAN(unittest.TestCase):
    def setUp(self):
        self.model_files = []

    def tearDown(self):
        for fl in self.model_files:
            os.remove(fl)

    @unittest.skipIf(is_keras_older_than("2.2.4"),
                     "keras version older than 2.2.4 not supported for SGAN")
    def test_SGAN(self):
        keras_model = SGAN().combined
        x = np.random.rand(5, 100).astype(np.float32)
        expected = keras_model.predict(x)
        onnx_model = keras2onnx.convert_keras(keras_model, keras_model.name)
        self.assertTrue(
            run_onnx_runtime(onnx_model.graph.name, onnx_model, x, expected,
                             self.model_files))
예제 #3
0
class TestDenseNet_1(unittest.TestCase):

    def setUp(self):
        self.model_files = []

    def tearDown(self):
        for fl in self.model_files:
            os.remove(fl)

    @unittest.skipIf(is_keras_older_than("2.2.3"),
                     "Cannot import normalize_data_format from keras.backend")
    def test_densenet(self):
        # From https://github.com/titu1994/DenseNet/blob/master/densenet.py
        sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../model_source/densenet_1/'))
        import densenet_1
        image_dim = (224, 224, 3)
        model = densenet_1.DenseNetImageNet121(input_shape=image_dim)
        res = run_image(model, self.model_files, img_path, target_size=(224, 224))
        self.assertTrue(*res)
예제 #4
0
class TestKerasApplications(unittest.TestCase):
    def setUp(self):
        self.model_files = []

    def tearDown(self):
        for fl in self.model_files:
            os.remove(fl)

    def test_MobileNet(self):
        mobilenet = keras.applications.mobilenet
        model = mobilenet.MobileNet(weights='imagenet')
        res = run_image(model, self.model_files, img_path)
        self.assertTrue(*res)

    @unittest.skipIf(is_keras_older_than("2.2.3"),
                     "There is no mobilenet_v2 module before keras 2.2.3.")
    def test_MobileNetV2(self):
        mobilenet_v2 = keras.applications.mobilenet_v2
        model = mobilenet_v2.MobileNetV2(weights='imagenet')
        res = run_image(model, self.model_files, img_path)
        self.assertTrue(*res)

    def test_ResNet50(self):
        from keras.applications.resnet50 import ResNet50
        model = ResNet50(include_top=True, weights='imagenet')
        res = run_image(model, self.model_files, img_path)
        self.assertTrue(*res)

    def test_InceptionV3(self):
        from keras.applications.inception_v3 import InceptionV3
        model = InceptionV3(include_top=True, weights='imagenet')
        res = run_image(model, self.model_files, img_path, target_size=299)
        self.assertTrue(*res)

    def test_DenseNet121(self):
        from keras.applications.densenet import DenseNet121
        model = DenseNet121(include_top=True, weights='imagenet')
        res = run_image(model, self.model_files, img_path)
        self.assertTrue(*res)

    def test_Xception(self):
        from keras.applications.xception import Xception
        model = Xception(include_top=True, weights='imagenet')
        res = run_image(model,
                        self.model_files,
                        img_path,
                        atol=5e-3,
                        target_size=299)
        self.assertTrue(*res)

    def test_SmileCNN(self):
        # From https://github.com/kylemcdonald/SmileCNN/blob/master/2%20Training.ipynb
        nb_filters = 32
        nb_pool = 2
        nb_conv = 3
        nb_classes = 2

        model = Sequential()

        model.add(
            Conv2D(nb_filters, (nb_conv, nb_conv),
                   activation='relu',
                   input_shape=(32, 32, 3)))
        model.add(Conv2D(nb_filters, (nb_conv, nb_conv), activation='relu'))
        model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
        model.add(Dropout(0.25))
        model.add(Flatten())
        model.add(Dense(128, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(nb_classes, activation='softmax'))
        res = run_image(model,
                        self.model_files,
                        img_path,
                        atol=5e-3,
                        target_size=32)
        self.assertTrue(*res)

    @unittest.skipIf(is_keras_older_than("2.2.4"),
                     "keras-resnet requires keras 2.2.4 or later.")
    def test_keras_resnet_batchnormalization(self):
        N, C, H, W = 2, 3, 120, 120
        import keras_resnet

        model = Sequential()
        model.add(
            ZeroPadding2D(padding=((3, 3), (3, 3)),
                          input_shape=(H, W, C),
                          data_format='channels_last'))
        model.add(
            Conv2D(64,
                   kernel_size=(7, 7),
                   strides=(2, 2),
                   padding='valid',
                   dilation_rate=(1, 1),
                   use_bias=False,
                   data_format='channels_last'))
        model.add(keras_resnet.layers.BatchNormalization(freeze=True, axis=3))

        onnx_model = keras2onnx.convert_keras(model, model.name)
        data = np.random.rand(N, H, W, C).astype(np.float32).reshape(
            (N, H, W, C))
        expected = model.predict(data)
        self.assertTrue(
            run_onnx_runtime(onnx_model.graph.name, onnx_model, data, expected,
                             self.model_files))

    def test_tcn(self):
        from tcn import TCN
        batch_size, timesteps, input_dim = None, 20, 1
        i = Input(batch_shape=(batch_size, timesteps, input_dim))
        for return_sequences in [True, False]:
            o = TCN(return_sequences=return_sequences)(
                i)  # The TCN layers are here.
            o = Dense(1)(o)
            model = keras.models.Model(inputs=[i], outputs=[o])
            onnx_model = keras2onnx.convert_keras(model, model.name)
            batch_size = 3
            data = np.random.rand(batch_size, timesteps,
                                  input_dim).astype(np.float32).reshape(
                                      (batch_size, timesteps, input_dim))
            expected = model.predict(data)
            self.assertTrue(
                run_onnx_runtime(onnx_model.graph.name, onnx_model, data,
                                 expected, self.model_files))
예제 #5
0
class TestUnet(unittest.TestCase):
    def setUp(self):
        self.model_files = []

    def tearDown(self):
        for fl in self.model_files:
            os.remove(fl)

    def test_unet_1(self):
        # From https://github.com/divamgupta/image-segmentation-keras/models/unet.py
        model = keras_segmentation.models.unet.unet(101)
        res = run_image(model,
                        self.model_files,
                        img_path,
                        target_size=(416, 608))
        self.assertTrue(*res)

    @unittest.skipIf(is_keras_older_than("2.2.3"),
                     "Cannot import normalize_data_format from keras.backend")
    def test_unet_2(self):
        # From https://github.com/jocicmarko/ultrasound-nerve-segmentation
        img_rows = 96
        img_cols = 96

        inputs = Input((img_rows, img_cols, 1))
        conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(inputs)
        conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv1)
        pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

        conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(pool1)
        conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv2)
        pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

        conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(pool2)
        conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv3)
        pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

        conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(pool3)
        conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv4)
        pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)

        conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(pool4)
        conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(conv5)

        up6 = concatenate([
            Conv2DTranspose(256, (2, 2), strides=(2, 2),
                            padding='same')(conv5), conv4
        ],
                          axis=3)
        conv6 = Conv2D(256, (3, 3), activation='relu', padding='same')(up6)
        conv6 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv6)

        up7 = concatenate([
            Conv2DTranspose(128, (2, 2), strides=(2, 2),
                            padding='same')(conv6), conv3
        ],
                          axis=3)
        conv7 = Conv2D(128, (3, 3), activation='relu', padding='same')(up7)
        conv7 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv7)

        up8 = concatenate([
            Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(conv7),
            conv2
        ],
                          axis=3)
        conv8 = Conv2D(64, (3, 3), activation='relu', padding='same')(up8)
        conv8 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv8)

        up9 = concatenate([
            Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(conv8),
            conv1
        ],
                          axis=3)
        conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(up9)
        conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv9)

        conv10 = Conv2D(1, (1, 1), activation='sigmoid')(conv9)

        model = Model(inputs=[inputs], outputs=[conv10])
        res = run_image(model,
                        self.model_files,
                        img_path,
                        color_mode="grayscale",
                        target_size=(img_rows, img_cols))
        self.assertTrue(*res)

    @unittest.skipIf(get_maximum_opset_supported() < 14,
                     "Need ConvTranspose-14 support.")
    def test_unet_3(self):
        # From https://github.com/yu4u/noise2noise/blob/master/model.py
        model = get_unet_model(out_ch=3, upconv=False)
        res = run_image(model,
                        self.model_files,
                        img_path,
                        target_size=(256, 256, 3))
        self.assertTrue(*res)
예제 #6
0
class TestKerasApplications(unittest.TestCase):
    def setUp(self):
        self.model_files = []

    def tearDown(self):
        for fl in self.model_files:
            os.remove(fl)

    def test_MobileNet(self):
        mobilenet = keras.applications.mobilenet
        model = mobilenet.MobileNet(weights='imagenet')
        res = run_image(model, self.model_files, img_path)
        self.assertTrue(*res)

    @unittest.skipIf(is_keras_older_than("2.2.3"),
                     "There is no mobilenet_v2 module before keras 2.2.3.")
    def test_MobileNetV2(self):
        mobilenet_v2 = keras.applications.mobilenet_v2
        model = mobilenet_v2.MobileNetV2(weights='imagenet')
        res = run_image(model, self.model_files, img_path)
        self.assertTrue(*res)

    def test_ResNet50(self):
        from keras.applications.resnet50 import ResNet50
        model = ResNet50(include_top=True, weights='imagenet')
        res = run_image(model, self.model_files, img_path)
        self.assertTrue(*res)

    def test_InceptionV3(self):
        from keras.applications.inception_v3 import InceptionV3
        model = InceptionV3(include_top=True, weights='imagenet')
        res = run_image(model, self.model_files, img_path, target_size=299)
        self.assertTrue(*res)

    def test_DenseNet121(self):
        from keras.applications.densenet import DenseNet121
        model = DenseNet121(include_top=True, weights='imagenet')
        res = run_image(model, self.model_files, img_path)
        self.assertTrue(*res)

    def test_Xception(self):
        from keras.applications.xception import Xception
        model = Xception(include_top=True, weights='imagenet')
        res = run_image(model,
                        self.model_files,
                        img_path,
                        atol=5e-3,
                        target_size=299)
        self.assertTrue(*res)

    def test_SmileCNN(self):
        # From https://github.com/kylemcdonald/SmileCNN/blob/master/2%20Training.ipynb
        nb_filters = 32
        nb_pool = 2
        nb_conv = 3
        nb_classes = 2

        model = Sequential()

        model.add(
            Conv2D(nb_filters, (nb_conv, nb_conv),
                   activation='relu',
                   input_shape=(32, 32, 3)))
        model.add(Conv2D(nb_filters, (nb_conv, nb_conv), activation='relu'))
        model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
        model.add(Dropout(0.25))
        model.add(Flatten())
        model.add(Dense(128, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(nb_classes, activation='softmax'))
        res = run_image(model,
                        self.model_files,
                        img_path,
                        atol=5e-3,
                        target_size=32)
        self.assertTrue(*res)

    @unittest.skipIf(is_keras_older_than("2.2.4"),
                     "keras-resnet requires keras 2.2.4 or later.")
    def test_keras_resnet_batchnormalization(self):
        N, C, H, W = 2, 3, 120, 120
        import keras_resnet

        model = Sequential()
        model.add(
            ZeroPadding2D(padding=((3, 3), (3, 3)),
                          input_shape=(H, W, C),
                          data_format='channels_last'))
        model.add(
            Conv2D(64,
                   kernel_size=(7, 7),
                   strides=(2, 2),
                   padding='valid',
                   dilation_rate=(1, 1),
                   use_bias=False,
                   data_format='channels_last'))
        model.add(keras_resnet.layers.BatchNormalization(freeze=True, axis=3))

        onnx_model = keras2onnx.convert_keras(model, model.name)
        data = np.random.rand(N, H, W, C).astype(np.float32).reshape(
            (N, H, W, C))
        expected = model.predict(data)
        self.assertTrue(
            run_keras_and_ort(onnx_model.graph.name, onnx_model, model, data,
                              expected, self.model_files))

    # model from https://github.com/titu1994/Image-Super-Resolution
    def test_ExpantionSuperResolution(self):
        init = Input(shape=(32, 32, 3))
        x = Convolution2D(64, (9, 9),
                          activation='relu',
                          padding='same',
                          name='level1')(init)
        x1 = Convolution2D(32, (1, 1),
                           activation='relu',
                           padding='same',
                           name='lavel1_1')(x)
        x2 = Convolution2D(32, (3, 3),
                           activation='relu',
                           padding='same',
                           name='lavel1_2')(x)
        x3 = Convolution2D(32, (5, 5),
                           activation='relu',
                           padding='same',
                           name='lavel1_3')(x)
        x = Average()([x1, x2, x3])
        out = Convolution2D(3, (5, 5),
                            activation='relu',
                            padding='same',
                            name='output')(x)
        model = keras.models.Model(init, out)
        res = run_image(model,
                        self.model_files,
                        img_path,
                        atol=5e-3,
                        target_size=32)
        self.assertTrue(*res)

    def test_tcn(self):
        from tcn import TCN
        batch_size, timesteps, input_dim = None, 20, 1
        actual_batch_size = 3
        i = Input(batch_shape=(batch_size, timesteps, input_dim))
        np.random.seed(
            1000
        )  # set the random seed to avoid the output result discrepancies.
        for return_sequences in [True, False]:
            o = TCN(return_sequences=return_sequences)(
                i)  # The TCN layers are here.
            o = Dense(1)(o)
            model = keras.models.Model(inputs=[i], outputs=[o])
            onnx_model = keras2onnx.convert_keras(model, model.name)
            data = np.random.rand(actual_batch_size, timesteps,
                                  input_dim).astype(np.float32).reshape(
                                      (actual_batch_size, timesteps,
                                       input_dim))
            expected = model.predict(data)
            self.assertTrue(
                run_keras_and_ort(onnx_model.graph.name, onnx_model, model,
                                  data, expected, self.model_files))

    # model from https://github.com/titu1994/LSTM-FCN
    @unittest.skipIf(test_level_0, "Test level 0 only.")
    def test_lstm_fcn(self):
        MAX_SEQUENCE_LENGTH = 176
        NUM_CELLS = 8
        NB_CLASS = 37
        ip = Input(shape=(1, MAX_SEQUENCE_LENGTH))

        x = LSTM(NUM_CELLS)(ip)
        x = Dropout(0.8)(x)

        y = Permute((2, 1))(ip)
        y = Conv1D(128, 8, padding='same', kernel_initializer='he_uniform')(y)
        y = BatchNormalization()(y)
        y = Activation('relu')(y)

        y = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(y)
        y = BatchNormalization()(y)
        y = Activation('relu')(y)

        y = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(y)
        y = BatchNormalization()(y)
        y = Activation('relu')(y)

        y = GlobalAveragePooling1D()(y)

        x = concatenate([x, y])

        out = Dense(NB_CLASS, activation='softmax')(x)

        model = Model(ip, out)
        onnx_model = keras2onnx.convert_keras(model, model.name)
        batch_size = 2
        data = np.random.rand(batch_size, 1,
                              MAX_SEQUENCE_LENGTH).astype(np.float32).reshape(
                                  batch_size, 1, MAX_SEQUENCE_LENGTH)
        expected = model.predict(data)
        self.assertTrue(
            run_keras_and_ort(onnx_model.graph.name, onnx_model, model, data,
                              expected, self.model_files))

    # model from https://github.com/CyberZHG/keras-self-attention
    @unittest.skipIf(test_level_0 or get_maximum_opset_supported() < 11,
                     "Test level 0 only.")
    def test_keras_self_attention(self):
        from keras_self_attention import SeqSelfAttention
        keras.backend.clear_session()

        model = keras.models.Sequential()
        model.add(
            keras.layers.Embedding(input_dim=10000,
                                   output_dim=300,
                                   mask_zero=True))
        model.add(
            keras.layers.Bidirectional(
                keras.layers.LSTM(units=128, return_sequences=True)))
        model.add(SeqSelfAttention(attention_activation='sigmoid'))
        model.add(keras.layers.Dense(units=5))
        onnx_model = keras2onnx.convert_keras(model, model.name)
        data = np.random.rand(5, 10).astype(np.float32).reshape(5, 10)
        expected = model.predict(data)
        self.assertTrue(
            run_keras_and_ort(onnx_model.graph.name, onnx_model, model, data,
                              expected, self.model_files))

    # Model from https://github.com/chandrikadeb7/Face-Mask-Detection
    @unittest.skipIf(test_level_0 or is_keras_older_than("2.2.3"),
                     "There is no mobilenet_v2 module before keras 2.2.3.")
    def test_FaceMaskDetection(self):
        mobilenet_v2 = keras.applications.mobilenet_v2
        baseModel = mobilenet_v2.MobileNetV2(weights=None,
                                             include_top=False,
                                             input_tensor=Input(shape=(224,
                                                                       224,
                                                                       3)))
        headModel = baseModel.output
        headModel = AveragePooling2D(pool_size=(7, 7))(headModel)
        headModel = Flatten(name="flatten")(headModel)
        headModel = Dense(128, activation="relu")(headModel)
        headModel = Dropout(0.5)(headModel)
        headModel = Dense(2, activation="softmax")(headModel)

        model = Model(inputs=baseModel.input, outputs=headModel)
        res = run_image(model, self.model_files, img_path)
        self.assertTrue(*res)

    # Model from https://github.com/abhishekrana/DeepFashion
    @unittest.skipIf(test_level_0, "Test level 0 only.")
    def test_DeepFashion(self):
        base_model = keras.applications.VGG16(weights=None,
                                              include_top=False,
                                              input_shape=(224, 224, 3))
        model_inputs = base_model.input
        common_inputs = base_model.output
        dropout_rate = 0.5
        output_classes = 20
        x = Flatten()(common_inputs)
        x = Dense(256, activation='tanh')(x)
        x = Dropout(dropout_rate)(x)
        predictions_class = Dense(output_classes,
                                  activation='softmax',
                                  name='predictions_class')(x)

        ## Model (Regression) IOU score
        x = Flatten()(common_inputs)
        x = Dense(256, activation='tanh')(x)
        x = Dropout(dropout_rate)(x)
        x = Dense(256, activation='tanh')(x)
        x = Dropout(dropout_rate)(x)
        predictions_iou = Dense(1,
                                activation='sigmoid',
                                name='predictions_iou')(x)

        ## Create Model
        keras_model = Model(inputs=model_inputs,
                            outputs=[predictions_class, predictions_iou])
        res = run_image(keras_model,
                        self.model_files,
                        img_path,
                        atol=5e-3,
                        target_size=224,
                        compare_perf=True)
        self.assertTrue(*res)

    # Model from https://github.com/manicman1999/Keras-BiGAN
    @unittest.skipIf(test_level_0, "Test level 0 only.")
    def test_bigan_generator(self):
        def g_block(inp, fil, u=True):

            if u:
                out = UpSampling2D(interpolation='bilinear')(inp)
            else:
                out = Activation('linear')(inp)

            skip = Conv2D(fil,
                          1,
                          padding='same',
                          kernel_initializer='he_normal')(out)

            out = Conv2D(filters=fil,
                         kernel_size=3,
                         padding='same',
                         kernel_initializer='he_normal')(out)
            out = LeakyReLU(0.2)(out)

            out = Conv2D(filters=fil,
                         kernel_size=3,
                         padding='same',
                         kernel_initializer='he_normal')(out)
            out = LeakyReLU(0.2)(out)

            out = Conv2D(fil,
                         1,
                         padding='same',
                         kernel_initializer='he_normal')(out)

            out = keras.layers.add([out, skip])
            out = LeakyReLU(0.2)(out)

            return out

        latent_size = 64
        cha = 16

        inp = Input(shape=[latent_size])

        x = Dense(4 * 4 * 16 * cha, kernel_initializer='he_normal')(inp)
        x = Reshape([4, 4, 16 * cha])(x)

        x = g_block(x, 16 * cha, u=False)  #4
        x = g_block(x, 8 * cha)  #8
        x = g_block(x, 4 * cha)  #16
        x = g_block(x, 3 * cha)  #32
        x = g_block(x, 2 * cha)  #64
        x = g_block(x, 1 * cha)  #128

        x = Conv2D(filters=3,
                   kernel_size=1,
                   activation='sigmoid',
                   padding='same',
                   kernel_initializer='he_normal')(x)

        model = Model(inputs=inp, outputs=x)
        onnx_model = keras2onnx.convert_keras(model, model.name)
        data = np.random.rand(200, latent_size).astype(np.float32).reshape(
            200, latent_size)
        expected = model.predict(data)
        self.assertTrue(
            run_keras_and_ort(onnx_model.graph.name, onnx_model, model, data,
                              expected, self.model_files))

    # Model from https://github.com/ankur219/ECG-Arrhythmia-classification
    @unittest.skipIf(test_level_0, "Test level 0 only.")
    def test_ecg_classification(self):
        model = Sequential()
        model.add(
            Conv2D(64, (3, 3),
                   strides=(1, 1),
                   input_shape=[128, 128, 3],
                   kernel_initializer='glorot_uniform'))
        model.add(keras.layers.ELU())
        model.add(BatchNormalization())
        model.add(
            Conv2D(64, (3, 3),
                   strides=(1, 1),
                   kernel_initializer='glorot_uniform'))
        model.add(keras.layers.ELU())
        model.add(BatchNormalization())
        model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
        model.add(
            Conv2D(128, (3, 3),
                   strides=(1, 1),
                   kernel_initializer='glorot_uniform'))
        model.add(keras.layers.ELU())
        model.add(BatchNormalization())
        model.add(
            Conv2D(128, (3, 3),
                   strides=(1, 1),
                   kernel_initializer='glorot_uniform'))
        model.add(keras.layers.ELU())
        model.add(BatchNormalization())
        model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
        model.add(
            Conv2D(256, (3, 3),
                   strides=(1, 1),
                   kernel_initializer='glorot_uniform'))
        model.add(keras.layers.ELU())
        model.add(BatchNormalization())
        model.add(
            Conv2D(256, (3, 3),
                   strides=(1, 1),
                   kernel_initializer='glorot_uniform'))
        model.add(keras.layers.ELU())
        model.add(BatchNormalization())
        model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
        model.add(Flatten())
        model.add(Dense(2048))
        model.add(keras.layers.ELU())
        model.add(BatchNormalization())
        model.add(Dropout(0.5))
        model.add(Dense(7, activation='softmax'))
        onnx_model = keras2onnx.convert_keras(model, model.name)
        data = np.random.rand(2, 128, 128, 3).astype(np.float32)
        expected = model.predict(data)
        self.assertTrue(
            run_keras_and_ort(onnx_model.graph.name, onnx_model, model, data,
                              expected, self.model_files))

    # Model from https://github.com/arunponnusamy/gender-detection-keras
    @unittest.skipIf(test_level_0, "Test level 0 only.")
    def test_gender_detection(self):
        model = Sequential()
        inputShape = (224, 224, 3)
        chanDim = -1
        model.add(Conv2D(32, (3, 3), padding="same", input_shape=inputShape))
        model.add(Activation("relu"))
        model.add(BatchNormalization(axis=chanDim))
        model.add(MaxPooling2D(pool_size=(3, 3)))
        model.add(Dropout(0.25))

        model.add(Conv2D(64, (3, 3), padding="same"))
        model.add(Activation("relu"))
        model.add(BatchNormalization(axis=chanDim))
        model.add(Conv2D(64, (3, 3), padding="same"))
        model.add(Activation("relu"))
        model.add(BatchNormalization(axis=chanDim))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))

        model.add(Conv2D(128, (3, 3), padding="same"))
        model.add(Activation("relu"))
        model.add(BatchNormalization(axis=chanDim))
        model.add(Conv2D(128, (3, 3), padding="same"))
        model.add(Activation("relu"))
        model.add(BatchNormalization(axis=chanDim))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))

        model.add(Flatten())
        model.add(Dense(1024))
        model.add(Activation("relu"))
        model.add(BatchNormalization())
        model.add(Dropout(0.5))

        model.add(Dense(80))
        model.add(Activation("sigmoid"))

        res = run_image(model,
                        self.model_files,
                        img_path,
                        atol=5e-3,
                        target_size=224)
        self.assertTrue(*res)