Exemplo n.º 1
0
def convert(model, name=None, initial_types=None, doc_string='', target_opset=None,
            targeted_onnx=onnx.__version__, custom_conversion_functions=None, custom_shape_calculators=None):
    """
    :param model: a libsvm model
    :param initial_types: a python list. Each element is a tuple of a variable name and a type defined in data_types.py
    :param name: The name of the graph (type: GraphProto) in the produced ONNX model (type: ModelProto)
    :param doc_string: A string attached onto the produced ONNX model
    :param target_opset: number, for example, 7 for ONNX 1.2, and 8 for ONNX 1.3.
    :param targeted_onnx: A string (for example, '1.1.2' and '1.2') used to specify the targeted ONNX version of the
    produced model. If ONNXMLTools cannot find a compatible ONNX python package, an error may be thrown.
    :param custom_conversion_functions: a dictionary for specifying the user customized conversion function
    :param custom_shape_calculators: a dictionary for specifying the user customized shape calculator
    :return: An ONNX model (type: ModelProto) which is equivalent to the input scikit-learn model
    """
    if initial_types is None:
        raise ValueError('Initial types are required. See usage of convert(...) in \
                         onnxmltools.convert.libsvm.convert for details')

    if name is None:
        name = str(uuid4().hex)
    target_opset = target_opset if target_opset else get_maximum_opset_supported()

    # Parse scikit-learn model as our internal data structure (i.e., Topology)
    topology = parse_libsvm(model, initial_types, custom_conversion_functions,
                            custom_shape_calculators)

    # Infer variable shapes
    topology.compile()

    # Convert our Topology object into ONNX. The outcome is an ONNX model.
    onnx_model = convert_topology(topology, name, doc_string, target_opset, targeted_onnx)

    return onnx_model
Exemplo n.º 2
0
def test_apply():
    oopb = _cmn.onnx_ops.OnnxOperatorBuilder(
        _cmn.OnnxObjectContainer(get_maximum_opset_supported()),
        _cmn.InterimContext('_curr'))
    value = oopb.apply_add(
        (np.array([[1.0], [0.5]], dtype='float32'),
         ('_i1', oopb.float, np.array([2.0], dtype='float32'))), 'add')
    assert value[0].startswith('add')
Exemplo n.º 3
0
    def common_test_xgboost_10_skl(self, missing, replace=False):
        this = os.path.abspath(os.path.dirname(__file__))
        data = os.path.join(this, "data_fail.csv")
        data = pandas.read_csv(data)

        for col in data:
            dtype = data[col].dtype
            if dtype in ['float64', 'float32']:
                data[col].fillna(0., inplace=True)
            if dtype in ['int64']:
                data[col].fillna(0, inplace=True)
            elif dtype in ['O']:
                data[col].fillna('N/A', inplace=True)

        data['pclass'] = data['pclass'] * float(1)
        full_df = data.drop('survived', axis=1)
        full_labels = data['survived']

        train_df, test_df, train_labels, test_labels = train_test_split(
            full_df, full_labels, test_size=.2, random_state=11)

        col_transformer = self._column_tranformer_fitted_from_df(full_df)

        param_distributions = {
            "colsample_bytree": 0.5,
            "gamma": 0.2,
            'learning_rate': 0.3,
            'max_depth': 2,
            'min_child_weight': 1.,
            'n_estimators': 1,
            'missing': missing,
        }

        regressor = XGBRegressor(verbose=0, objective='reg:squarederror',
                                 **param_distributions)
        regressor.fit(col_transformer.transform(train_df), train_labels)
        model = Pipeline(steps=[('preprocessor', col_transformer),
                                ('regressor', regressor)])

        update_registered_converter(
            XGBRegressor, 'XGBRegressor',
            calculate_linear_regressor_output_shapes,
            convert_xgb)

        # last step
        input_xgb = model.steps[0][-1].transform(test_df[:5]).astype(np.float32)
        if replace:
            input_xgb[input_xgb[:, :] == missing] = np.nan
        onnx_last = convert_sklearn(model.steps[1][-1],
                                    initial_types=[('X', FloatTensorType(shape=[None, input_xgb.shape[1]]))],
                                    target_opset=get_maximum_opset_supported())
        session = rt.InferenceSession(onnx_last.SerializeToString())
        pred_skl = model.steps[1][-1].predict(input_xgb).ravel()
        pred_onx = session.run(None, {'X': input_xgb})[0].ravel()
        assert_almost_equal(pred_skl, pred_onx)
Exemplo n.º 4
0
def convert_keras(model, name=None, doc_string='', target_opset=None,
                  channel_first_inputs=None, debug_mode=False, custom_op_conversions=None):
    # type: (keras.Model, str, str, int, [], bool, {}) -> onnx.ModelProto
    """
    :param model: keras model
    :param name: the converted onnx model internal name
    :param doc_string: doc string
    :param target_opset: the targeted onnx model opset
    :param channel_first_inputs: A list of channel first input
    :param debug_mode: will enable the log and try to convert as much as possible on conversion
    :param custom_op_conversions: the handler for custom operator conversion
    :return an ONNX ModelProto
    """
    if isinstance(model, tf.keras.Model) and not is_tf_keras:
        raise Exception("This is a tensorflow keras model, but keras standalone converter is used." +
                        " Please set environment variable TF_KERAS = 1.")

    set_logger_level(logging.DEBUG if debug_mode else logging.INFO)
    if is_tf2:
        from tensorflow.python.eager import context
        k2o_logger().info("tf executing eager_mode: {}".format(context.executing_eagerly()))
        if hasattr(model, 'run_eagerly'):
            k2o_logger().info("tf.keras model eager_mode: {}".format(model.run_eagerly))
    if debug_mode:
        print(model.summary())

    name = name or model.name
    target_opset = target_opset or get_maximum_opset_supported()

    input_names = []
    output_names = []
    output_dict = {}
    if is_tf2 and is_tf_keras:
        tf_graph = build_layer_output_from_model(model, output_dict, input_names, output_names)
    else:
        tf_graph = model.outputs[0].graph if is_tf2 else keras.backend.get_session().graph
        output_dict = build_opdict_from_keras(model)
        output_names = [n.name for n in model.outputs]

    static_set_ke2onnx_converters(set_converter)
    dump_graph_into_tensorboard(tf_graph)
    topology = Topology(model, tf_graph,
                        target_opset=target_opset,
                        custom_op_dict=custom_op_conversions)
    topology.debug_mode = debug_mode
    if (not model.inputs) or (not model.outputs):
        # Since Tensorflow 2.2, For the subclassed tf.keras model, there is no inputs/outputs info ...
        # ... stored in model object any more.
        parse_graph_modeless(topology, tf_graph, target_opset, input_names, output_names, output_dict)
    else:
        parse_graph(topology, tf_graph, target_opset, output_names, output_dict)
    topology.compile()

    return convert_topology(topology, name, doc_string, target_opset, channel_first_inputs)
Exemplo n.º 5
0
def convert_model(yolo, is_tiny_yolo, target_opset=None):
    if target_opset is None:
        target_opset = get_maximum_opset_supported()

    onnxmodel_1 = convert_keras(yolo.yolo_model,
                                target_opset=target_opset,
                                channel_first_inputs=['input_1'])
    onnxmodel_2 = convert_keras(yolo.evaluation_model,
                                target_opset=target_opset)
    onnxmodel_3 = convert_keras(yolo.nms_model, target_opset=target_opset)
    Graph.opset = target_opset

    if is_tiny_yolo:
        global yolo_model_graph_tiny
        global evaluation_model_graph_tiny
        global nms_model_graph_tiny
        yolo_model_graph_tiny = Graph.load(
            onnxmodel_1,
            inputs=[input_.name for input_ in onnxmodel_1.graph.input
                    ])  # define the order of arguments
        evaluation_model_graph_tiny = Graph.load(
            onnxmodel_2,
            inputs=[input_.name for input_ in onnxmodel_2.graph.input],
            outputs=[output_.name for output_ in onnxmodel_2.graph.output])
        nms_model_graph_tiny = Graph.load(
            onnxmodel_3,
            inputs=[input_.name for input_ in onnxmodel_3.graph.input],
            outputs=[output_.name for output_ in onnxmodel_3.graph.output])

        return combine_model_tiny.oxml
    else:
        global yolo_model_graph
        global evaluation_model_graph
        global nms_model_graph
        yolo_model_graph = Graph.load(
            onnxmodel_1,
            inputs=[input_.name for input_ in onnxmodel_1.graph.input
                    ])  # define the order of arguments
        evaluation_model_graph = Graph.load(
            onnxmodel_2,
            inputs=[input_.name for input_ in onnxmodel_2.graph.input],
            outputs=[output_.name for output_ in onnxmodel_2.graph.output])
        nms_model_graph = Graph.load(
            onnxmodel_3,
            inputs=[input_.name for input_ in onnxmodel_3.graph.input],
            outputs=[output_.name for output_ in onnxmodel_3.graph.output])

        return combine_model.oxml
Exemplo n.º 6
0
class TestMusicGeneration(unittest.TestCase):

    def setUp(self):
        self.model_files = []

    def tearDown(self):
        for fl in self.model_files:
            os.remove(fl)

    @unittest.skipIf(get_maximum_opset_supported() < 10,
                     "ScatterNd support need opset >= 10.")
    def test_music_generation(self):
        K.clear_session()
        model, time_model, note_model = build_models()

        batch_size = 2
        data_notes = np.random.rand(batch_size, SEQ_LEN, NUM_NOTES, NOTE_UNITS).astype(np.float32)
        data_beat = np.random.rand(batch_size, SEQ_LEN, NOTES_PER_BAR).astype(np.float32)
        data_style = np.random.rand(batch_size, SEQ_LEN, NUM_STYLES).astype(np.float32)
        data_chosen = np.random.rand(batch_size, SEQ_LEN, NUM_NOTES, NOTE_UNITS).astype(np.float32)

        expected = model.predict([data_notes, data_chosen, data_beat, data_style])
        onnx_model = keras2onnx.convert_keras(model, model.name)
        self.assertTrue(
            run_keras_and_ort(onnx_model.graph.name, onnx_model, model,
                              {model.input_names[0]: data_notes,
                               model.input_names[1]: data_chosen,
                               model.input_names[2]: data_beat,
                               model.input_names[3]: data_style}, expected, self.model_files))

        expected = time_model.predict([data_notes, data_beat, data_style])
        onnx_model = keras2onnx.convert_keras(time_model, time_model.name)
        self.assertTrue(
            run_keras_and_ort(onnx_model.graph.name, onnx_model, time_model,
                              {time_model.input_names[0]: data_notes,
                               time_model.input_names[1]: data_beat,
                               time_model.input_names[2]: data_style}, expected, self.model_files))

        data_notes = np.random.rand(batch_size, 1, NUM_NOTES, TIME_AXIS_UNITS).astype(np.float32)
        data_chosen = np.random.rand(batch_size, 1, NUM_NOTES, NOTE_UNITS).astype(np.float32)
        data_style = np.random.rand(batch_size, 1, NUM_STYLES).astype(np.float32)
        expected = note_model.predict([data_notes, data_chosen, data_style])
        onnx_model = keras2onnx.convert_keras(note_model, note_model.name)
        self.assertTrue(
            run_keras_and_ort(onnx_model.graph.name, onnx_model, note_model,
                              {note_model.input_names[0]: data_notes,
                               note_model.input_names[1]: data_chosen,
                               note_model.input_names[2]: data_style}, expected, self.model_files))
Exemplo n.º 7
0
def convert(model,
            name=None,
            initial_types=None,
            doc_string='',
            target_opset=None,
            targeted_onnx=onnx.__version__,
            custom_conversion_functions=None,
            custom_shape_calculators=None):
    '''
    This function produces an equivalent ONNX model of the given lightgbm model.
    The supported lightgbm modules are listed below.
    
    * `LGBMClassifiers <https://lightgbm.readthedocs.io/en/latest/pythonapi/lightgbm.LGBMClassifier.html>`_
    * `LGBMRegressor <https://lightgbm.readthedocs.io/en/latest/pythonapi/lightgbm.LGBMRegressor.html>`_
    * `Booster <https://lightgbm.readthedocs.io/en/latest/pythonapi/lightgbm.Booster.html>`_

    :param model: A LightGBM model
    :param initial_types: a python list. Each element is a tuple of a variable name and a type defined in data_types.py
    :param name: The name of the graph (type: GraphProto) in the produced ONNX model (type: ModelProto)
    :param doc_string: A string attached onto the produced ONNX model
    :param target_opset: number, for example, 7 for ONNX 1.2, and 8 for ONNX 1.3.
    :param targeted_onnx: A string (for example, '1.1.2' and '1.2') used to specify the targeted ONNX version of the
        produced model. If ONNXMLTools cannot find a compatible ONNX python package, an error may be thrown.
    :param custom_conversion_functions: a dictionary for specifying the user customized conversion function
    :param custom_shape_calculators: a dictionary for specifying the user customized shape calculator
    :return: An ONNX model (type: ModelProto) which is equivalent to the input lightgbm model
    '''
    if initial_types is None:
        raise ValueError(
            'Initial types are required. See usage of convert(...) in '
            'onnxmltools.convert.lightgbm.convert for details')
    if isinstance(model, lightgbm.Booster):
        model = WrappedBooster(model)
    if name is None:
        name = str(uuid4().hex)

    target_opset = target_opset if target_opset else get_maximum_opset_supported(
    )
    topology = parse_lightgbm(model, initial_types, target_opset,
                              custom_conversion_functions,
                              custom_shape_calculators)
    topology.compile()
    onnx_model = convert_topology(topology, name, doc_string, target_opset,
                                  targeted_onnx)
    return onnx_model
class TestNameEntityRecognition(unittest.TestCase):

    def setUp(self):
        self.model_files = []

    def tearDown(self):
        for fl in self.model_files:
            os.remove(fl)

    @unittest.skipIf(get_maximum_opset_supported() < 11,
                     "Deep speech conversion need opset >= 11.")
    def test_name_entity_recognition(self):
        K.clear_session()
        words_input = Input(shape=(None,), dtype='int32', name='words_input')
        words = Embedding(input_dim=10, output_dim=20,
                          weights=None, trainable=False)(words_input)
        casing_input = Input(shape=(None,), dtype='int32', name='casing_input')
        casing = Embedding(output_dim=20, input_dim=12,
                           weights=None, trainable=False)(casing_input)
        character_input = Input(shape=(None, 52,), name='char_input')
        embed_char_out = TimeDistributed(
            Embedding(26, 20),
            name='char_embedding')(character_input)
        dropout = Dropout(0.5)(embed_char_out)
        conv1d_out = TimeDistributed(Conv1D(kernel_size=3, filters=30, padding='same', activation='tanh', strides=1))(
            dropout)
        maxpool_out = TimeDistributed(MaxPooling1D(52))(conv1d_out)
        char = TimeDistributed(Flatten())(maxpool_out)
        char = Dropout(0.5)(char)
        output = concatenate([words, casing, char])
        output = Bidirectional(LSTM(200, return_sequences=True, dropout=0.50, recurrent_dropout=0.25))(output)
        output = TimeDistributed(Dense(35, activation='softmax'))(output)
        keras_model = Model(inputs=[words_input, casing_input, character_input], outputs=[output])
        batch_size = 100
        data1 = np.random.randint(5, 10, size=(batch_size, 6)).astype(np.int32)
        data2 = np.random.randint(5, 10, size=(batch_size, 6)).astype(np.int32)
        data3 = np.random.rand(batch_size, 6, 52).astype(np.float32)
        expected = keras_model.predict([data1, data2, data3])
        onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
        self.assertTrue(
            run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model,
                              {keras_model.input_names[0]: data1,
                               keras_model.input_names[1]: data2,
                               keras_model.input_names[2]: data3}, expected, self.model_files))
Exemplo n.º 9
0
class TestSegNet(unittest.TestCase):
    def setUp(self):
        self.model_files = []

    def tearDown(self):
        for fl in self.model_files:
            os.remove(fl)

    @unittest.skipIf(get_maximum_opset_supported() < 11,
                     "ScatterNd support need opset >= 11.")
    def test_segnet(self):
        K.clear_session()
        keras_model = segnet((128, 128, 3), 80)
        data = np.random.rand(2, 128, 128, 3).astype(np.float32)
        expected = keras_model.predict(data)
        onnx_model = keras2onnx.convert_keras(keras_model, keras_model.name)
        self.assertTrue(
            run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model,
                              data, expected, self.model_files))
Exemplo n.º 10
0
class TestDeepLabV3(unittest.TestCase):
    def setUp(self):
        self.model_files = []

    def tearDown(self):
        for fl in self.model_files:
            os.remove(fl)

    @unittest.skipIf(get_maximum_opset_supported() < 11,
                     "DeeplabV3 is not supported for opset < 11.")
    def test_Deeplab_v3(self):
        K.clear_session()
        keras_model = Deeplabv3(weights=None)
        data = np.random.rand(2, 512, 512, 3).astype(np.float32)
        expected = keras_model.predict(data)
        onnx_model = keras2onnx.convert_keras(keras_model, keras_model.name)
        self.assertTrue(
            run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model,
                              data, expected, self.model_files))
Exemplo n.º 11
0
class TestCRAFT(unittest.TestCase):
    def setUp(self):
        self.model_files = []

    def tearDown(self):
        for fl in self.model_files:
            os.remove(fl)

    @unittest.skipIf(get_maximum_opset_supported() < 10,
                     "Need Upsample 10+ support.")
    def test_CRAFT(self):
        # input_image = Input(shape=(None, None, 3)) -- Need fixed input shape
        input_image = Input(shape=(512, 512, 3))
        region, affinity = VGG16_UNet(input_tensor=input_image, weights=None)
        keras_model = Model(input_image, [region, affinity], name='vgg16_unet')
        x = np.random.rand(1, 512, 512, 3).astype(np.float32)
        expected = keras_model.predict(x)
        onnx_model = keras2onnx.convert_keras(keras_model, keras_model.name)
        self.assertTrue(
            run_onnx_runtime(onnx_model.graph.name, onnx_model, x, expected,
                             self.model_files))
Exemplo n.º 12
0
def convert(model,
            name=None,
            initial_types=None,
            doc_string='',
            target_opset=None,
            targeted_onnx=onnx.__version__,
            custom_conversion_functions=None,
            custom_shape_calculators=None,
            without_onnx_ml=False,
            zipmap=True):
    '''
    This function produces an equivalent ONNX model of the given lightgbm model.
    The supported lightgbm modules are listed below.

    * `LGBMClassifiers <https://lightgbm.readthedocs.io/en/latest/pythonapi/lightgbm.LGBMClassifier.html>`_
    * `LGBMRegressor <https://lightgbm.readthedocs.io/en/latest/pythonapi/lightgbm.LGBMRegressor.html>`_
    * `Booster <https://lightgbm.readthedocs.io/en/latest/pythonapi/lightgbm.Booster.html>`_

    :param model: A LightGBM model
    :param initial_types: a python list. Each element is a tuple of a variable name and a type defined in data_types.py
    :param name: The name of the graph (type: GraphProto) in the produced ONNX model (type: ModelProto)
    :param doc_string: A string attached onto the produced ONNX model
    :param target_opset: number, for example, 7 for ONNX 1.2, and 8 for ONNX 1.3.
    :param targeted_onnx: A string (for example, '1.1.2' and '1.2') used to specify the targeted ONNX version of the
        produced model. If ONNXMLTools cannot find a compatible ONNX python package, an error may be thrown.
    :param custom_conversion_functions: a dictionary for specifying the user customized conversion function
    :param custom_shape_calculators: a dictionary for specifying the user customized shape calculator
    :param without_onnx_ml: whether to generate a model composed by ONNX operators only, or to allow the converter
    :param zipmap: remove operator ZipMap from the ONNX graph
    to use ONNX-ML operators as well.
    :return: An ONNX model (type: ModelProto) which is equivalent to the input lightgbm model
    '''
    if initial_types is None:
        raise ValueError(
            'Initial types are required. See usage of convert(...) in '
            'onnxmltools.convert.lightgbm.convert for details')
    if without_onnx_ml and not hummingbird_installed():
        raise RuntimeError(
            'Hummingbird is not installed. Please install hummingbird to use this feature: pip install hummingbird-ml'
        )
    if isinstance(model, lightgbm.Booster):
        model = WrappedBooster(model)
    if name is None:
        name = str(uuid4().hex)

    target_opset = target_opset if target_opset else get_maximum_opset_supported(
    )
    topology = parse_lightgbm(model,
                              initial_types,
                              target_opset,
                              custom_conversion_functions,
                              custom_shape_calculators,
                              zipmap=zipmap)
    topology.compile()
    onnx_ml_model = convert_topology(topology, name, doc_string, target_opset,
                                     targeted_onnx)

    if without_onnx_ml:
        from hummingbird.ml import convert, constants
        extra_config = {}
        # extra_config[constants.ONNX_INITIAL_TYPES] = initial_types
        extra_config[constants.ONNX_OUTPUT_MODEL_NAME] = name
        extra_config[constants.ONNX_TARGET_OPSET] = target_opset
        onnx_model = convert(onnx_ml_model, "onnx",
                             extra_config=extra_config).model
        return onnx_model

    return onnx_ml_model
Exemplo n.º 13
0
def convert(model,
            name=None,
            initial_types=None,
            doc_string='',
            target_opset=None,
            targeted_onnx=onnx.__version__,
            custom_conversion_functions=None,
            custom_shape_calculators=None,
            spark_session=None):
    '''
    This function produces an equivalent ONNX model of the given spark-ml model. The supported spark-ml
    modules are listed below.

    * Preprocessings and transformations:
      1.  pyspark.ml.feature.DictVectorizer
      3.  preprocessing.LabelEncoder
      5.  preprocessing.OneHotEncoder

    * Linear classification and regression:
      9.  pyspark.ml.classification.LinearRegression

    * Support vector machine for classification and regression

    * Tree-based models for classification and regression

    * pipeline
      29. pipeline.Pipeline

    For pipeline conversion, user needs to make sure each component is one of our supported items (1)-(24).

    This function converts the specified spark-ml model into its ONNX counterpart. Notice that for all conversions,
    initial types are required.  ONNX model name can also be specified.

    :param model: A spark-ml model
    :param initial_types: a python list. Each element is a tuple of a variable name and a type defined in data_types.py
    :param name: The name of the graph (type: GraphProto) in the produced ONNX model (type: ModelProto)
    :param doc_string: A string attached onto the produced ONNX model
    :param target_opset: number, for example, 7 for ONNX 1.2, and 8 for ONNX 1.3.
    :param targeted_onnx: A string (for example, '1.1.2' and '1.2') used to specify the targeted ONNX version of the
    produced model. If ONNXMLTools cannot find a compatible ONNX python package, an error may be thrown.
    :param custom_conversion_functions: a dictionary for specifying the user customized conversion function
    :param custom_shape_calculators: a dictionary for specifying the user customized shape calculator
    :return: An ONNX model (type: ModelProto) which is equivalent to the input spark-ml model

    Example of initial_types:
    Assume that the specified spark-ml model takes a heterogeneous list as its input. If the first 5 elements are
    floats and the last 10 elements are integers, we need to specify initial types as below. The [1] in [1, 5] indicates
    the batch size here is 1.
    >>> from onnxmltools.convert.common.data_types import FloatTensorType, Int64TensorType
    >>> initial_type = [('float_input', FloatTensorType([1, 5])), ('int64_input', Int64TensorType([1, 10]))]
    '''
    if initial_types is None:
        raise ValueError(
            'Initial types are required. See usage of convert(...) in \
                         onnxmltools.convert.sparkml.convert for details')

    if name is None:
        name = str(uuid4().hex)

    target_opset = target_opset if target_opset else get_maximum_opset_supported(
    )
    # Parse spark-ml model as our internal data structure (i.e., Topology)
    topology = parse_sparkml(spark_session, model, initial_types, target_opset,
                             custom_conversion_functions,
                             custom_shape_calculators)

    # Infer variable shapes
    topology.compile()

    # Convert our Topology object into ONNX. The outcome is an ONNX model.
    onnx_model = convert_topology(topology, name, doc_string, target_opset,
                                  targeted_onnx)

    return onnx_model
Exemplo n.º 14
0
class TestDeepSpeech(unittest.TestCase):
    def setUp(self):
        self.model_files = []

    def tearDown(self):
        for fl in self.model_files:
            os.remove(fl)

    @unittest.skipIf(get_maximum_opset_supported() < 11,
                     "Deep speech conversion need opset >= 11.")
    def test_deep_speech(self):
        K.clear_session()
        input_dim = 20
        output_dim = 10
        context = 7
        units = 1024
        dropouts = (0.1, 0.1, 0)

        # Define input tensor [batch, time, features]
        input_tensor = layers.Input([None, input_dim], name='X')

        # Add 4th dimension [batch, time, frequency, channel]
        x = layers.Lambda(keras.backend.expand_dims,
                          arguments=dict(axis=-1))(input_tensor)
        # Fill zeros around time dimension
        x = layers.ZeroPadding2D(padding=(context, 0))(x)
        # Convolve signal in time dim
        receptive_field = (2 * context + 1, input_dim)
        x = layers.Conv2D(filters=units, kernel_size=receptive_field)(x)
        # Squeeze into 3rd dim array
        x = layers.Lambda(keras.backend.squeeze, arguments=dict(axis=2))(x)
        # Add non-linearity
        x = layers.ReLU(max_value=20)(x)
        # Use dropout as regularization
        x = layers.Dropout(rate=dropouts[0])(x)

        # 2nd and 3rd FC layers do a feature extraction base on a narrow
        # context of convolutional layer
        x = layers.TimeDistributed(layers.Dense(units))(x)
        x = layers.ReLU(max_value=20)(x)
        x = layers.Dropout(rate=dropouts[1])(x)

        x = layers.TimeDistributed(layers.Dense(units))(x)
        x = layers.ReLU(max_value=20)(x)
        x = layers.Dropout(rate=dropouts[2])(x)

        # Use recurrent layer to have a broader context
        x = layers.Bidirectional(layers.LSTM(units, return_sequences=True),
                                 merge_mode='sum')(x)

        # Return at each time step logits along characters. Then CTC
        # computation is more stable, in contrast to the softmax.
        output_tensor = layers.TimeDistributed(layers.Dense(output_dim))(x)
        model = keras.Model(input_tensor, output_tensor, name='DeepSpeech')
        data = np.random.rand(2, 3, input_dim).astype(np.float32)
        expected = model.predict(data)
        onnx_model = mock_keras2onnx.convert_keras(model, model.name)
        self.assertTrue(
            run_keras_and_ort(onnx_model.graph.name, onnx_model, model, data,
                              expected, self.model_files))

    @unittest.skipIf(get_maximum_opset_supported() < 11,
                     "Deep speech conversion need opset >= 11.")
    def test_deep_speech_2(self):
        K.clear_session()
        input_dim = 20
        output_dim = 10
        rnn_units = 800
        # Define input tensor [batch, time, features]
        input_tensor = layers.Input([None, input_dim], name='X')

        # Add 4th dimension [batch, time, frequency, channel]
        x = layers.Lambda(keras.backend.expand_dims,
                          arguments=dict(axis=-1))(input_tensor)
        x = layers.Conv2D(filters=32,
                          kernel_size=[11, 41],
                          strides=[2, 2],
                          padding='same',
                          use_bias=False,
                          name='conv_1')(x)
        x = layers.BatchNormalization(name='conv_1_bn')(x)
        x = layers.ReLU(name='conv_1_relu')(x)

        x = layers.Conv2D(filters=32,
                          kernel_size=[11, 21],
                          strides=[1, 2],
                          padding='same',
                          use_bias=False,
                          name='conv_2')(x)
        x = layers.BatchNormalization(name='conv_2_bn')(x)
        x = layers.ReLU(name='conv_2_relu')(x)
        # We need to squeeze to 3D tensor. Thanks to the stride in frequency
        # domain, we reduce the number of features four times for each channel.
        x = layers.Reshape([-1, input_dim // 4 * 32])(x)

        for i in [1, 2, 3, 4, 5]:
            recurrent = layers.GRU(units=rnn_units,
                                   activation='tanh',
                                   recurrent_activation='sigmoid',
                                   use_bias=True,
                                   return_sequences=True,
                                   reset_after=True,
                                   name='gru_' + str(i))
            x = layers.Bidirectional(recurrent,
                                     name='bidirectional' + str(i),
                                     merge_mode='concat')(x)
            x = layers.Dropout(rate=0.5)(x) if i < 5 else x  # Only between

        # Return at each time step logits along characters. Then CTC
        # computation is more stable, in contrast to the softmax.
        x = layers.TimeDistributed(layers.Dense(units=rnn_units * 2),
                                   name='dense_1')(x)
        x = layers.ReLU(name='dense_1_relu')(x)
        x = layers.Dropout(rate=0.5)(x)
        output_tensor = layers.TimeDistributed(layers.Dense(units=output_dim),
                                               name='dense_2')(x)

        model = keras.Model(input_tensor, output_tensor, name='DeepSpeech2')
        data = np.random.rand(2, 3, input_dim).astype(np.float32)
        expected = model.predict(data)
        onnx_model = mock_keras2onnx.convert_keras(model, model.name)
        self.assertTrue(
            run_keras_and_ort(onnx_model.graph.name, onnx_model, model, data,
                              expected, self.model_files))
Exemplo n.º 15
0
class TestTransformers(unittest.TestCase):

    text_str = 'The quick brown fox jumps over lazy dog.'

    def setUp(self):
        self.model_files = []

    def tearDown(self):
        for fl in self.model_files:
            os.remove(fl)

    def _get_token_path(self, file_name):
        return 'https://lotus.blob.core.windows.net/converter-models/transformer_tokenizer/' + file_name

    def _get_tokenzier(self, tokenizer_file):
        token_path = self._get_token_path(tokenizer_file)
        if not os.path.exists(tokenizer_file):
            urllib.request.urlretrieve(token_path, tokenizer_file)
        with open(tokenizer_file, 'rb') as handle:
            tokenizer = pickle.load(handle)
        return tokenizer

    def _prepare_inputs(self, tokenizer, batch_size=3):
        raw_data = json.dumps({
            'text': self.text_str
        })
        text = json.loads(raw_data)['text']
        # The tokenizers are generated using transformers 2.5.0, but model_max_length is introduced and needed in 2.9.0. 
        if not hasattr(tokenizer, 'model_max_length'):
            tokenizer.model_max_length = 1024
        inputs_raw = tokenizer.encode_plus(text, add_special_tokens=True)
        inputs_onnx = {k_: np.repeat(np.expand_dims(v_, axis=0), batch_size, axis=0) for k_, v_ in inputs_raw.items()}
        inputs = {k_: tf.constant(v_) for k_, v_ in inputs_onnx.items()}
        return text, inputs, inputs_onnx

    @unittest.skip("Output shape mismatch for tf model prediction.")
    def test_3layer_gpt2(self):
        from transformers import GPT2Config, TFGPT2Model, BertTokenizer
        keras2onnx.proto.keras.backend.set_learning_phase(0)
        config = GPT2Config(n_layer=3)
        model = TFGPT2Model(config)
        tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
        text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
        inputs = tokenizer.encode_plus(text, add_special_tokens=True, return_tensors='tf')
        predictions = model.predict(inputs)
        onnx_model = keras2onnx.convert_keras(model, model.name)
        self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files))

    def test_TFBertModel(self):
        from transformers import BertConfig, TFBertModel
        keras.backend.clear_session()
        # pretrained_weights = 'bert-base-uncased'
        tokenizer_file = 'bert_bert-base-uncased.pickle'
        tokenizer = self._get_tokenzier(tokenizer_file)
        text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
        config = BertConfig()
        model = TFBertModel(config)
        predictions = model.predict(inputs)
        onnx_model = keras2onnx.convert_keras(model, model.name)
        self.assertTrue(
            run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files, rtol=1.e-2,
                             atol=1.e-4))

    @unittest.skipIf(not enable_full_transformer_test, "Full transfomer test is not enabled")
    def test_TFBertForPreTraining(self):
        from transformers import BertConfig, TFBertForPreTraining
        keras.backend.clear_session()
        # pretrained_weights = 'bert-base-uncased'
        tokenizer_file = 'bert_bert-base-uncased.pickle'
        tokenizer = self._get_tokenzier(tokenizer_file)
        text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
        config = BertConfig()
        model = TFBertForPreTraining(config)
        predictions = model.predict(inputs)
        onnx_model = keras2onnx.convert_keras(model, model.name)
        self.assertTrue(
            run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files, rtol=1.e-2,
                             atol=1.e-4))

    def test_TFBertForMaskedLM(self):
        from transformers import BertConfig, TFBertForMaskedLM
        keras.backend.clear_session()
        # pretrained_weights = 'bert-base-uncased'
        tokenizer_file = 'bert_bert-base-uncased.pickle'
        tokenizer = self._get_tokenzier(tokenizer_file)
        text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
        config = BertConfig()
        model = TFBertForMaskedLM(config)
        predictions = model.predict(inputs)
        onnx_model = keras2onnx.convert_keras(model, model.name)
        self.assertTrue(
            run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files, rtol=1.e-2,
                             atol=1.e-4))

    @unittest.skipIf(not enable_full_transformer_test, "Full transfomer test is not enabled")
    def test_TFBertForNextSentencePrediction(self):
        from transformers import BertConfig, TFBertForNextSentencePrediction
        keras.backend.clear_session()
        # pretrained_weights = 'bert-base-uncased'
        tokenizer_file = 'bert_bert-base-uncased.pickle'
        tokenizer = self._get_tokenzier(tokenizer_file)
        text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
        config = BertConfig()
        model = TFBertForNextSentencePrediction(config)
        predictions = model.predict(inputs)
        onnx_model = keras2onnx.convert_keras(model, model.name)
        self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files))

    def test_TFBertForSequenceClassification(self):
        from transformers import BertConfig, TFBertForSequenceClassification
        keras.backend.clear_session()
        # pretrained_weights = 'bert-base-uncased'
        tokenizer_file = 'bert_bert-base-uncased.pickle'
        tokenizer = self._get_tokenzier(tokenizer_file)
        text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
        config = BertConfig()
        model = TFBertForSequenceClassification(config)
        predictions = model.predict(inputs)
        onnx_model = keras2onnx.convert_keras(model, model.name)
        self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files))

    def test_TFBertForTokenClassification(self):
        from transformers import BertConfig, TFBertForTokenClassification
        keras.backend.clear_session()
        # pretrained_weights = 'bert-base-uncased'
        tokenizer_file = 'bert_bert-base-uncased.pickle'
        tokenizer = self._get_tokenzier(tokenizer_file)
        text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
        config = BertConfig()
        model = TFBertForTokenClassification(config)
        predictions = model.predict(inputs)
        onnx_model = keras2onnx.convert_keras(model, model.name)
        self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files))

    def test_TFBertForQuestionAnswering(self):
        from transformers import BertConfig, TFBertForQuestionAnswering
        keras.backend.clear_session()
        # pretrained_weights = 'bert-base-uncased'
        tokenizer_file = 'bert_bert-base-uncased.pickle'
        tokenizer = self._get_tokenzier(tokenizer_file)
        text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
        config = BertConfig()
        model = TFBertForQuestionAnswering(config)
        predictions = model.predict(inputs)
        onnx_model = keras2onnx.convert_keras(model, model.name)
        self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files))

    def test_TFGPT2(self):
        if enable_full_transformer_test:
            from transformers import GPT2Config, TFGPT2Model, TFGPT2LMHeadModel, TFGPT2DoubleHeadsModel
            model_list = [TFGPT2Model, TFGPT2LMHeadModel, TFGPT2DoubleHeadsModel]
        else:
            from transformers import GPT2Config, TFGPT2Model
            model_list = [TFGPT2Model]
        # pretrained_weights = 'gpt2'
        tokenizer_file = 'gpt2_gpt2.pickle'
        tokenizer = self._get_tokenzier(tokenizer_file)
        text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
        config = GPT2Config()
        for model_instance_ in model_list:
            keras.backend.clear_session()
            model = model_instance_(config)
            model._set_inputs(inputs)
            predictions_original = model(inputs)
            predictions = [predictions_original[0]] + list(v_.numpy() for v_ in predictions_original[1])
            onnx_model = keras2onnx.convert_keras(model, model.name)
            self.assertTrue(
                run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files, rtol=1.e-2,
                                 atol=1.e-4))

    @unittest.skipIf(get_maximum_opset_supported() < 12, "Einsum is not supported until opset 12.")
    def test_TFXLNet(self):
        if enable_full_transformer_test:
            from transformers import XLNetConfig, TFXLNetModel, TFXLNetLMHeadModel, TFXLNetForSequenceClassification, \
                TFXLNetForTokenClassification, TFXLNetForQuestionAnsweringSimple
            model_list = [TFXLNetModel, TFXLNetLMHeadModel, TFXLNetForSequenceClassification, \
                TFXLNetForTokenClassification, TFXLNetForQuestionAnsweringSimple]
        else:
            from transformers import XLNetConfig, TFXLNetModel
            model_list = [TFXLNetModel]

        # pretrained_weights = 'xlnet-large-cased'
        tokenizer_file = 'xlnet_xlnet-large-cased.pickle'
        tokenizer = self._get_tokenzier(tokenizer_file)
        config = XLNetConfig(n_layer=2)
        # The model with input mask has MatrixDiagV3 which is not a registered function/op
        token = tokenizer.encode(self.text_str, add_special_tokens=True)
        inputs_onnx = {'input_1': np.expand_dims(token, axis=0)}
        inputs = tf.constant(token)[None, :]  # Batch size 1

        for model_instance_ in model_list:
            keras.backend.clear_session()
            model = model_instance_(config)
            predictions = model.predict(inputs)
            onnx_model = keras2onnx.convert_keras(model)
            self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files, rtol=1.e-2,
                             atol=1.e-4))

    @unittest.skipIf(not enable_full_transformer_test, "Full transfomer test is not enabled")
    def test_TFOpenAIGPTModel(self):
        from transformers import OpenAIGPTConfig, TFOpenAIGPTModel
        keras.backend.clear_session()
        # pretrained_weights = 'openai-gpt'
        tokenizer_file = 'openai_openai-gpt.pickle'
        tokenizer = self._get_tokenzier(tokenizer_file)
        text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
        config = OpenAIGPTConfig()
        model = TFOpenAIGPTModel(config)
        predictions = model.predict(inputs)
        onnx_model = keras2onnx.convert_keras(model, model.name)
        self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files))

    def test_TFOpenAIGPTLMHeadModel(self):
        from transformers import OpenAIGPTConfig, TFOpenAIGPTLMHeadModel
        keras.backend.clear_session()
        # pretrained_weights = 'openai-gpt'
        tokenizer_file = 'openai_openai-gpt.pickle'
        tokenizer = self._get_tokenzier(tokenizer_file)
        text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
        config = OpenAIGPTConfig()
        model = TFOpenAIGPTLMHeadModel(config)
        predictions = model.predict(inputs)
        onnx_model = keras2onnx.convert_keras(model, model.name)
        self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files, rtol=1.e-2,
                             atol=1.e-4))

    def test_TFOpenAIGPTDoubleHeadsModel(self):
        from transformers import OpenAIGPTConfig, TFOpenAIGPTDoubleHeadsModel
        keras.backend.clear_session()
        # pretrained_weights = 'openai-gpt'
        tokenizer_file = 'openai_openai-gpt.pickle'
        tokenizer = self._get_tokenzier(tokenizer_file)
        # tf.gather(hidden_states, cls_index, batch_dims=len(hidden_shape) - 2), batch_dims = 1 in this case
        text, inputs, inputs_onnx = self._prepare_inputs(tokenizer, batch_size=1)
        config = OpenAIGPTConfig()
        model = TFOpenAIGPTDoubleHeadsModel(config)
        predictions = model.predict(inputs)
        onnx_model = keras2onnx.convert_keras(model, model.name)
        self.assertTrue(
            run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files, rtol=1.e-2,
                             atol=1.e-4))

    @unittest.skip('tensorflow.GraphDef exceeds maximum protobuf size of 2GB')
    def test_TFXLMModel(self):
        from transformers import XLMConfig, TFXLMModel
        keras.backend.clear_session()
        # pretrained_weights = 'xlm-mlm-enfr-1024'
        tokenizer_file = 'xlm_xlm-mlm-enfr-1024.pickle'
        tokenizer = self._get_tokenzier(tokenizer_file)
        text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
        config = XLMConfig()
        model = TFXLMModel(config)
        predictions = model.predict(inputs)
        onnx_model = keras2onnx.convert_keras(model, model.name)
        self.assertTrue(
            run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files, rtol=1.e-2,
                             atol=1.e-4))

    @unittest.skip('tensorflow.GraphDef exceeds maximum protobuf size of 2GB')
    def test_TFXLMWithLMHeadModel(self):
        from transformers import XLMConfig, TFXLMWithLMHeadModel
        keras.backend.clear_session()
        # pretrained_weights = 'xlm-mlm-enfr-1024'
        tokenizer_file = 'xlm_xlm-mlm-enfr-1024.pickle'
        tokenizer = self._get_tokenzier(tokenizer_file)
        text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
        config = XLMConfig()
        model = TFXLMWithLMHeadModel(config)
        predictions = model.predict(inputs)
        onnx_model = keras2onnx.convert_keras(model, model.name)
        self.assertTrue(
            run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files, rtol=1.e-2,
                             atol=1.e-4))

    @unittest.skip('tensorflow.GraphDef exceeds maximum protobuf size of 2GB')
    def test_TFXLMForSequenceClassification(self):
        from transformers import XLMConfig, TFXLMForSequenceClassification
        keras.backend.clear_session()
        # pretrained_weights = 'xlm-mlm-enfr-1024'
        tokenizer_file = 'xlm_xlm-mlm-enfr-1024.pickle'
        tokenizer = self._get_tokenzier(tokenizer_file)
        text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
        config = XLMConfig()
        model = TFXLMForSequenceClassification(config)
        predictions = model.predict(inputs)
        onnx_model = keras2onnx.convert_keras(model, model.name)
        self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files))

    @unittest.skip('tensorflow.GraphDef exceeds maximum protobuf size of 2GB')
    def test_TFXLMForQuestionAnsweringSimple(self):
        from transformers import XLMConfig, TFXLMForQuestionAnsweringSimple
        keras.backend.clear_session()
        # pretrained_weights = 'xlm-mlm-enfr-1024'
        tokenizer_file = 'xlm_xlm-mlm-enfr-1024.pickle'
        tokenizer = self._get_tokenzier(tokenizer_file)
        text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
        config = XLMConfig()
        model = TFXLMForQuestionAnsweringSimple(config)
        predictions = model.predict(inputs)
        onnx_model = keras2onnx.convert_keras(model, model.name)
        self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files))

    def test_TFDistilBertModel(self):
        from transformers import DistilBertConfig, TFDistilBertModel
        keras.backend.clear_session()
        # pretrained_weights = 'distilbert-base-uncased'
        tokenizer_file = 'distilbert_distilbert-base-uncased.pickle'
        tokenizer = self._get_tokenzier(tokenizer_file)
        text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
        config = DistilBertConfig()
        model = TFDistilBertModel(config)
        predictions = model.predict(inputs)
        onnx_model = keras2onnx.convert_keras(model, model.name)
        self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files))

    def test_TFDistilBertForMaskedLM(self):
        from transformers import DistilBertConfig, TFDistilBertForMaskedLM
        keras.backend.clear_session()
        # pretrained_weights = 'distilbert-base-uncased'
        tokenizer_file = 'distilbert_distilbert-base-uncased.pickle'
        tokenizer = self._get_tokenzier(tokenizer_file)
        text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
        config = DistilBertConfig()
        model = TFDistilBertForMaskedLM(config)
        predictions = model.predict(inputs)
        onnx_model = keras2onnx.convert_keras(model, model.name)
        self.assertTrue(
            run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files, rtol=1.e-2,
                             atol=1.e-4))

    @unittest.skipIf(not enable_full_transformer_test, "Full transfomer test is not enabled")
    def test_TFDistilBertForSequenceClassification(self):
        from transformers import DistilBertConfig, TFDistilBertForSequenceClassification
        keras.backend.clear_session()
        # pretrained_weights = 'distilbert-base-uncased'
        tokenizer_file = 'distilbert_distilbert-base-uncased.pickle'
        tokenizer = self._get_tokenzier(tokenizer_file)
        text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
        config = DistilBertConfig()
        model = TFDistilBertForSequenceClassification(config)
        predictions = model.predict(inputs)
        onnx_model = keras2onnx.convert_keras(model, model.name)
        self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files))

    def test_TFDistilBertForTokenClassification(self):
        from transformers import DistilBertConfig, TFDistilBertForTokenClassification
        keras.backend.clear_session()
        # pretrained_weights = 'distilbert-base-uncased'
        tokenizer_file = 'distilbert_distilbert-base-uncased.pickle'
        tokenizer = self._get_tokenzier(tokenizer_file)
        text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
        config = DistilBertConfig()
        model = TFDistilBertForTokenClassification(config)
        predictions = model.predict(inputs)
        onnx_model = keras2onnx.convert_keras(model, model.name)
        self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files))

    def test_TFDistilBertForQuestionAnswering(self):
        from transformers import DistilBertConfig, TFDistilBertForQuestionAnswering
        keras.backend.clear_session()
        # pretrained_weights = 'distilbert-base-uncased'
        tokenizer_file = 'distilbert_distilbert-base-uncased.pickle'
        tokenizer = self._get_tokenzier(tokenizer_file)
        text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
        config = DistilBertConfig()
        model = TFDistilBertForQuestionAnswering(config)
        predictions = model.predict(inputs)
        onnx_model = keras2onnx.convert_keras(model, model.name)
        self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files))

    @unittest.skipIf(not enable_full_transformer_test, "Full transfomer test is not enabled")
    def test_TFRobertaModel(self):
        from transformers import RobertaConfig, TFRobertaModel
        keras.backend.clear_session()
        # pretrained_weights = 'roberta-base'
        tokenizer_file = 'roberta_roberta-base.pickle'
        tokenizer = self._get_tokenzier(tokenizer_file)
        text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
        config = RobertaConfig()
        model = TFRobertaModel(config)
        predictions = model.predict(inputs)
        onnx_model = keras2onnx.convert_keras(model, model.name)
        self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files))

    def test_TFRobertaForMaskedLM(self):
        from transformers import RobertaConfig, TFRobertaForMaskedLM
        keras.backend.clear_session()
        # pretrained_weights = 'roberta-base'
        tokenizer_file = 'roberta_roberta-base.pickle'
        tokenizer = self._get_tokenzier(tokenizer_file)
        text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
        config = RobertaConfig()
        model = TFRobertaForMaskedLM(config)
        predictions = model.predict(inputs)
        onnx_model = keras2onnx.convert_keras(model, model.name)
        self.assertTrue(
            run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files, rtol=1.e-2,
                             atol=1.e-4))

    def test_TFRobertaForSequenceClassification(self):
        from transformers import RobertaConfig, TFRobertaForSequenceClassification
        keras.backend.clear_session()
        # pretrained_weights = 'roberta-base'
        tokenizer_file = 'roberta_roberta-base.pickle'
        tokenizer = self._get_tokenzier(tokenizer_file)
        text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
        config = RobertaConfig()
        model = TFRobertaForSequenceClassification(config)
        predictions = model.predict(inputs)
        onnx_model = keras2onnx.convert_keras(model, model.name)
        self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files))

    @unittest.skipIf(not enable_full_transformer_test, "Full transfomer test is not enabled")
    def test_TFRobertaForTokenClassification(self):
        from transformers import RobertaConfig, TFRobertaForTokenClassification
        keras.backend.clear_session()
        # pretrained_weights = 'roberta-base'
        tokenizer_file = 'roberta_roberta-base.pickle'
        tokenizer = self._get_tokenzier(tokenizer_file)
        text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
        config = RobertaConfig()
        model = TFRobertaForTokenClassification(config)
        predictions = model.predict(inputs)
        onnx_model = keras2onnx.convert_keras(model, model.name)
        self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files))
Exemplo n.º 16
0
class TestMLSTM_FCN(unittest.TestCase):
    def setUp(self):
        self.model_files = []

    def tearDown(self):
        for fl in self.model_files:
            os.remove(fl)

    @unittest.skipIf(test_level_0 or get_maximum_opset_supported() < 11,
                     "Test level 0 only.")
    def test_MLSTM_FCN(self):
        K.clear_session()
        ip = Input(shape=(MAX_NB_VARIABLES, MAX_TIMESTEPS))

        x = Masking()(ip)
        x = LSTM(8)(x)
        x = Dropout(0.8)(x)

        y = Permute((2, 1))(ip)
        y = Conv1D(128, 8, padding='same', kernel_initializer='he_uniform')(y)
        y = BatchNormalization()(y)
        y = Activation('relu')(y)
        y = squeeze_excite_block(y)

        y = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(y)
        y = BatchNormalization()(y)
        y = Activation('relu')(y)
        y = squeeze_excite_block(y)

        y = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(y)
        y = BatchNormalization()(y)
        y = Activation('relu')(y)

        y = GlobalAveragePooling1D()(y)

        x = concatenate([x, y])
        out = Dense(NB_CLASS, activation='softmax')(x)
        keras_model = Model(ip, out)
        data = np.random.rand(2, MAX_NB_VARIABLES,
                              MAX_TIMESTEPS).astype(np.float32)
        expected = keras_model.predict(data)
        onnx_model = mock_keras2onnx.convert_keras(keras_model,
                                                   keras_model.name)
        self.assertTrue(
            run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model,
                              data, expected, self.model_files))

    @unittest.skipIf(test_level_0 or get_maximum_opset_supported() < 11,
                     "Test level 0 only.")
    def test_LSTM_FCN(self):
        K.clear_session()
        ip = Input(shape=(MAX_NB_VARIABLES, MAX_TIMESTEPS))

        x = Masking()(ip)
        x = LSTM(8)(x)
        x = Dropout(0.8)(x)

        y = Permute((2, 1))(ip)
        y = Conv1D(128, 8, padding='same', kernel_initializer='he_uniform')(y)
        y = BatchNormalization()(y)
        y = Activation('relu')(y)

        y = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(y)
        y = BatchNormalization()(y)
        y = Activation('relu')(y)

        y = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(y)
        y = BatchNormalization()(y)
        y = Activation('relu')(y)

        y = GlobalAveragePooling1D()(y)

        x = concatenate([x, y])

        out = Dense(NB_CLASS, activation='softmax')(x)

        keras_model = Model(ip, out)
        data = np.random.rand(2, MAX_NB_VARIABLES,
                              MAX_TIMESTEPS).astype(np.float32)
        expected = keras_model.predict(data)
        onnx_model = mock_keras2onnx.convert_keras(keras_model,
                                                   keras_model.name)
        self.assertTrue(
            run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model,
                              data, expected, self.model_files))
Exemplo n.º 17
0
class TestKerasApplications(unittest.TestCase):
    def setUp(self):
        self.model_files = []

    def tearDown(self):
        for fl in self.model_files:
            os.remove(fl)

    def test_MobileNet(self):
        mobilenet = keras.applications.mobilenet
        model = mobilenet.MobileNet(weights='imagenet')
        res = run_image(model, self.model_files, img_path)
        self.assertTrue(*res)

    @unittest.skipIf(is_keras_older_than("2.2.3"),
                     "There is no mobilenet_v2 module before keras 2.2.3.")
    def test_MobileNetV2(self):
        mobilenet_v2 = keras.applications.mobilenet_v2
        model = mobilenet_v2.MobileNetV2(weights='imagenet')
        res = run_image(model, self.model_files, img_path)
        self.assertTrue(*res)

    def test_ResNet50(self):
        from keras.applications.resnet50 import ResNet50
        model = ResNet50(include_top=True, weights='imagenet')
        res = run_image(model, self.model_files, img_path)
        self.assertTrue(*res)

    def test_InceptionV3(self):
        from keras.applications.inception_v3 import InceptionV3
        model = InceptionV3(include_top=True, weights='imagenet')
        res = run_image(model, self.model_files, img_path, target_size=299)
        self.assertTrue(*res)

    def test_DenseNet121(self):
        from keras.applications.densenet import DenseNet121
        model = DenseNet121(include_top=True, weights='imagenet')
        res = run_image(model, self.model_files, img_path)
        self.assertTrue(*res)

    def test_Xception(self):
        from keras.applications.xception import Xception
        model = Xception(include_top=True, weights='imagenet')
        res = run_image(model,
                        self.model_files,
                        img_path,
                        atol=5e-3,
                        target_size=299)
        self.assertTrue(*res)

    def test_SmileCNN(self):
        # From https://github.com/kylemcdonald/SmileCNN/blob/master/2%20Training.ipynb
        nb_filters = 32
        nb_pool = 2
        nb_conv = 3
        nb_classes = 2

        model = Sequential()

        model.add(
            Conv2D(nb_filters, (nb_conv, nb_conv),
                   activation='relu',
                   input_shape=(32, 32, 3)))
        model.add(Conv2D(nb_filters, (nb_conv, nb_conv), activation='relu'))
        model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
        model.add(Dropout(0.25))
        model.add(Flatten())
        model.add(Dense(128, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(nb_classes, activation='softmax'))
        res = run_image(model,
                        self.model_files,
                        img_path,
                        atol=5e-3,
                        target_size=32)
        self.assertTrue(*res)

    @unittest.skipIf(is_keras_older_than("2.2.4"),
                     "keras-resnet requires keras 2.2.4 or later.")
    def test_keras_resnet_batchnormalization(self):
        N, C, H, W = 2, 3, 120, 120
        import keras_resnet

        model = Sequential()
        model.add(
            ZeroPadding2D(padding=((3, 3), (3, 3)),
                          input_shape=(H, W, C),
                          data_format='channels_last'))
        model.add(
            Conv2D(64,
                   kernel_size=(7, 7),
                   strides=(2, 2),
                   padding='valid',
                   dilation_rate=(1, 1),
                   use_bias=False,
                   data_format='channels_last'))
        model.add(keras_resnet.layers.BatchNormalization(freeze=True, axis=3))

        onnx_model = keras2onnx.convert_keras(model, model.name)
        data = np.random.rand(N, H, W, C).astype(np.float32).reshape(
            (N, H, W, C))
        expected = model.predict(data)
        self.assertTrue(
            run_keras_and_ort(onnx_model.graph.name, onnx_model, model, data,
                              expected, self.model_files))

    # model from https://github.com/titu1994/Image-Super-Resolution
    def test_ExpantionSuperResolution(self):
        init = Input(shape=(32, 32, 3))
        x = Convolution2D(64, (9, 9),
                          activation='relu',
                          padding='same',
                          name='level1')(init)
        x1 = Convolution2D(32, (1, 1),
                           activation='relu',
                           padding='same',
                           name='lavel1_1')(x)
        x2 = Convolution2D(32, (3, 3),
                           activation='relu',
                           padding='same',
                           name='lavel1_2')(x)
        x3 = Convolution2D(32, (5, 5),
                           activation='relu',
                           padding='same',
                           name='lavel1_3')(x)
        x = Average()([x1, x2, x3])
        out = Convolution2D(3, (5, 5),
                            activation='relu',
                            padding='same',
                            name='output')(x)
        model = keras.models.Model(init, out)
        res = run_image(model,
                        self.model_files,
                        img_path,
                        atol=5e-3,
                        target_size=32)
        self.assertTrue(*res)

    def test_tcn(self):
        from tcn import TCN
        batch_size, timesteps, input_dim = None, 20, 1
        actual_batch_size = 3
        i = Input(batch_shape=(batch_size, timesteps, input_dim))
        np.random.seed(
            1000
        )  # set the random seed to avoid the output result discrepancies.
        for return_sequences in [True, False]:
            o = TCN(return_sequences=return_sequences)(
                i)  # The TCN layers are here.
            o = Dense(1)(o)
            model = keras.models.Model(inputs=[i], outputs=[o])
            onnx_model = keras2onnx.convert_keras(model, model.name)
            data = np.random.rand(actual_batch_size, timesteps,
                                  input_dim).astype(np.float32).reshape(
                                      (actual_batch_size, timesteps,
                                       input_dim))
            expected = model.predict(data)
            self.assertTrue(
                run_keras_and_ort(onnx_model.graph.name, onnx_model, model,
                                  data, expected, self.model_files))

    # model from https://github.com/titu1994/LSTM-FCN
    @unittest.skipIf(test_level_0, "Test level 0 only.")
    def test_lstm_fcn(self):
        MAX_SEQUENCE_LENGTH = 176
        NUM_CELLS = 8
        NB_CLASS = 37
        ip = Input(shape=(1, MAX_SEQUENCE_LENGTH))

        x = LSTM(NUM_CELLS)(ip)
        x = Dropout(0.8)(x)

        y = Permute((2, 1))(ip)
        y = Conv1D(128, 8, padding='same', kernel_initializer='he_uniform')(y)
        y = BatchNormalization()(y)
        y = Activation('relu')(y)

        y = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(y)
        y = BatchNormalization()(y)
        y = Activation('relu')(y)

        y = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(y)
        y = BatchNormalization()(y)
        y = Activation('relu')(y)

        y = GlobalAveragePooling1D()(y)

        x = concatenate([x, y])

        out = Dense(NB_CLASS, activation='softmax')(x)

        model = Model(ip, out)
        onnx_model = keras2onnx.convert_keras(model, model.name)
        batch_size = 2
        data = np.random.rand(batch_size, 1,
                              MAX_SEQUENCE_LENGTH).astype(np.float32).reshape(
                                  batch_size, 1, MAX_SEQUENCE_LENGTH)
        expected = model.predict(data)
        self.assertTrue(
            run_keras_and_ort(onnx_model.graph.name, onnx_model, model, data,
                              expected, self.model_files))

    # model from https://github.com/CyberZHG/keras-self-attention
    @unittest.skipIf(test_level_0 or get_maximum_opset_supported() < 11,
                     "Test level 0 only.")
    def test_keras_self_attention(self):
        from keras_self_attention import SeqSelfAttention
        keras.backend.clear_session()

        model = keras.models.Sequential()
        model.add(
            keras.layers.Embedding(input_dim=10000,
                                   output_dim=300,
                                   mask_zero=True))
        model.add(
            keras.layers.Bidirectional(
                keras.layers.LSTM(units=128, return_sequences=True)))
        model.add(SeqSelfAttention(attention_activation='sigmoid'))
        model.add(keras.layers.Dense(units=5))
        onnx_model = keras2onnx.convert_keras(model, model.name)
        data = np.random.rand(5, 10).astype(np.float32).reshape(5, 10)
        expected = model.predict(data)
        self.assertTrue(
            run_keras_and_ort(onnx_model.graph.name, onnx_model, model, data,
                              expected, self.model_files))

    # Model from https://github.com/chandrikadeb7/Face-Mask-Detection
    @unittest.skipIf(test_level_0 or is_keras_older_than("2.2.3"),
                     "There is no mobilenet_v2 module before keras 2.2.3.")
    def test_FaceMaskDetection(self):
        mobilenet_v2 = keras.applications.mobilenet_v2
        baseModel = mobilenet_v2.MobileNetV2(weights=None,
                                             include_top=False,
                                             input_tensor=Input(shape=(224,
                                                                       224,
                                                                       3)))
        headModel = baseModel.output
        headModel = AveragePooling2D(pool_size=(7, 7))(headModel)
        headModel = Flatten(name="flatten")(headModel)
        headModel = Dense(128, activation="relu")(headModel)
        headModel = Dropout(0.5)(headModel)
        headModel = Dense(2, activation="softmax")(headModel)

        model = Model(inputs=baseModel.input, outputs=headModel)
        res = run_image(model, self.model_files, img_path)
        self.assertTrue(*res)

    # Model from https://github.com/abhishekrana/DeepFashion
    @unittest.skipIf(test_level_0, "Test level 0 only.")
    def test_DeepFashion(self):
        base_model = keras.applications.VGG16(weights=None,
                                              include_top=False,
                                              input_shape=(224, 224, 3))
        model_inputs = base_model.input
        common_inputs = base_model.output
        dropout_rate = 0.5
        output_classes = 20
        x = Flatten()(common_inputs)
        x = Dense(256, activation='tanh')(x)
        x = Dropout(dropout_rate)(x)
        predictions_class = Dense(output_classes,
                                  activation='softmax',
                                  name='predictions_class')(x)

        ## Model (Regression) IOU score
        x = Flatten()(common_inputs)
        x = Dense(256, activation='tanh')(x)
        x = Dropout(dropout_rate)(x)
        x = Dense(256, activation='tanh')(x)
        x = Dropout(dropout_rate)(x)
        predictions_iou = Dense(1,
                                activation='sigmoid',
                                name='predictions_iou')(x)

        ## Create Model
        keras_model = Model(inputs=model_inputs,
                            outputs=[predictions_class, predictions_iou])
        res = run_image(keras_model,
                        self.model_files,
                        img_path,
                        atol=5e-3,
                        target_size=224,
                        compare_perf=True)
        self.assertTrue(*res)

    # Model from https://github.com/manicman1999/Keras-BiGAN
    @unittest.skipIf(test_level_0, "Test level 0 only.")
    def test_bigan_generator(self):
        def g_block(inp, fil, u=True):

            if u:
                out = UpSampling2D(interpolation='bilinear')(inp)
            else:
                out = Activation('linear')(inp)

            skip = Conv2D(fil,
                          1,
                          padding='same',
                          kernel_initializer='he_normal')(out)

            out = Conv2D(filters=fil,
                         kernel_size=3,
                         padding='same',
                         kernel_initializer='he_normal')(out)
            out = LeakyReLU(0.2)(out)

            out = Conv2D(filters=fil,
                         kernel_size=3,
                         padding='same',
                         kernel_initializer='he_normal')(out)
            out = LeakyReLU(0.2)(out)

            out = Conv2D(fil,
                         1,
                         padding='same',
                         kernel_initializer='he_normal')(out)

            out = keras.layers.add([out, skip])
            out = LeakyReLU(0.2)(out)

            return out

        latent_size = 64
        cha = 16

        inp = Input(shape=[latent_size])

        x = Dense(4 * 4 * 16 * cha, kernel_initializer='he_normal')(inp)
        x = Reshape([4, 4, 16 * cha])(x)

        x = g_block(x, 16 * cha, u=False)  #4
        x = g_block(x, 8 * cha)  #8
        x = g_block(x, 4 * cha)  #16
        x = g_block(x, 3 * cha)  #32
        x = g_block(x, 2 * cha)  #64
        x = g_block(x, 1 * cha)  #128

        x = Conv2D(filters=3,
                   kernel_size=1,
                   activation='sigmoid',
                   padding='same',
                   kernel_initializer='he_normal')(x)

        model = Model(inputs=inp, outputs=x)
        onnx_model = keras2onnx.convert_keras(model, model.name)
        data = np.random.rand(200, latent_size).astype(np.float32).reshape(
            200, latent_size)
        expected = model.predict(data)
        self.assertTrue(
            run_keras_and_ort(onnx_model.graph.name, onnx_model, model, data,
                              expected, self.model_files))

    # Model from https://github.com/ankur219/ECG-Arrhythmia-classification
    @unittest.skipIf(test_level_0, "Test level 0 only.")
    def test_ecg_classification(self):
        model = Sequential()
        model.add(
            Conv2D(64, (3, 3),
                   strides=(1, 1),
                   input_shape=[128, 128, 3],
                   kernel_initializer='glorot_uniform'))
        model.add(keras.layers.ELU())
        model.add(BatchNormalization())
        model.add(
            Conv2D(64, (3, 3),
                   strides=(1, 1),
                   kernel_initializer='glorot_uniform'))
        model.add(keras.layers.ELU())
        model.add(BatchNormalization())
        model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
        model.add(
            Conv2D(128, (3, 3),
                   strides=(1, 1),
                   kernel_initializer='glorot_uniform'))
        model.add(keras.layers.ELU())
        model.add(BatchNormalization())
        model.add(
            Conv2D(128, (3, 3),
                   strides=(1, 1),
                   kernel_initializer='glorot_uniform'))
        model.add(keras.layers.ELU())
        model.add(BatchNormalization())
        model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
        model.add(
            Conv2D(256, (3, 3),
                   strides=(1, 1),
                   kernel_initializer='glorot_uniform'))
        model.add(keras.layers.ELU())
        model.add(BatchNormalization())
        model.add(
            Conv2D(256, (3, 3),
                   strides=(1, 1),
                   kernel_initializer='glorot_uniform'))
        model.add(keras.layers.ELU())
        model.add(BatchNormalization())
        model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
        model.add(Flatten())
        model.add(Dense(2048))
        model.add(keras.layers.ELU())
        model.add(BatchNormalization())
        model.add(Dropout(0.5))
        model.add(Dense(7, activation='softmax'))
        onnx_model = keras2onnx.convert_keras(model, model.name)
        data = np.random.rand(2, 128, 128, 3).astype(np.float32)
        expected = model.predict(data)
        self.assertTrue(
            run_keras_and_ort(onnx_model.graph.name, onnx_model, model, data,
                              expected, self.model_files))

    # Model from https://github.com/arunponnusamy/gender-detection-keras
    @unittest.skipIf(test_level_0, "Test level 0 only.")
    def test_gender_detection(self):
        model = Sequential()
        inputShape = (224, 224, 3)
        chanDim = -1
        model.add(Conv2D(32, (3, 3), padding="same", input_shape=inputShape))
        model.add(Activation("relu"))
        model.add(BatchNormalization(axis=chanDim))
        model.add(MaxPooling2D(pool_size=(3, 3)))
        model.add(Dropout(0.25))

        model.add(Conv2D(64, (3, 3), padding="same"))
        model.add(Activation("relu"))
        model.add(BatchNormalization(axis=chanDim))
        model.add(Conv2D(64, (3, 3), padding="same"))
        model.add(Activation("relu"))
        model.add(BatchNormalization(axis=chanDim))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))

        model.add(Conv2D(128, (3, 3), padding="same"))
        model.add(Activation("relu"))
        model.add(BatchNormalization(axis=chanDim))
        model.add(Conv2D(128, (3, 3), padding="same"))
        model.add(Activation("relu"))
        model.add(BatchNormalization(axis=chanDim))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))

        model.add(Flatten())
        model.add(Dense(1024))
        model.add(Activation("relu"))
        model.add(BatchNormalization())
        model.add(Dropout(0.5))

        model.add(Dense(80))
        model.add(Activation("sigmoid"))

        res = run_image(model,
                        self.model_files,
                        img_path,
                        atol=5e-3,
                        target_size=224)
        self.assertTrue(*res)
Exemplo n.º 18
0
from onnxconverter_common.onnx_fx import GraphFunctionType as _Ty
from onnxconverter_common.onnx_ex import get_maximum_opset_supported
from onnxconverter_common.optimizer import optimize_onnx_model


def _ort_inference(mdl, inputs):
    sess = _ort.InferenceSession(mdl.SerializeToString())
    return sess.run(None, inputs)


Graph.inference_runtime = _ort_inference
Graph.opset = 9
onnx_function = Graph.trace


@unittest.skipIf(get_maximum_opset_supported() < 9, "onnx_fx only supports ONNX opset 9 and greater")
class ONNXFunctionTest(unittest.TestCase):
    # this works, and the exported graph is usable:
    def test_core(self):
        @onnx_function
        def f(x, y):
            return x + y

        @onnx_function
        def g(x, y):
            return x.ox.abs(f(x, y) + 1.0)

        self.assertTrue(
            np.allclose(g([2.0], [-5.0]), np.array([2.0])))

    def test_loop(self):
Exemplo n.º 19
0
class TestUnetPlusPlus(unittest.TestCase):
    def setUp(self):
        self.model_files = []

    def tearDown(self):
        for fl in self.model_files:
            os.remove(fl)

    @unittest.skipIf(get_maximum_opset_supported() < 14,
                     "Need ConvTranspose-14 support.")
    def test_unet_plus_plus(self):
        backbone_name = 'vgg16'
        input_shape = (None, None, 3)
        input_tensor = None
        encoder_weights = None  #'imagenet'

        backbone = VGG16(input_shape=input_shape,
                         input_tensor=input_tensor,
                         weights=encoder_weights,
                         include_top=False)

        input = backbone.input
        x = backbone.output
        block_type = 'transpose'

        if block_type == 'transpose':
            up_block = Transpose2D_block
        else:
            up_block = Upsample2D_block

        skip_connection_layers = ('block5_conv3', 'block4_conv3',
                                  'block3_conv3', 'block2_conv2',
                                  'block1_conv2')

        # convert layer names to indices
        skip_connection_idx = ([
            get_layer_number(backbone, l) if isinstance(l, str) else l
            for l in skip_connection_layers
        ])

        n_upsample_blocks = 5
        upsample_rates = (2, 2, 2, 2, 2)
        decoder_filters = (256, 128, 64, 32, 16)
        block_type = 'upsampling'
        activation = 'sigmoid'
        use_batchnorm = True
        classes = 1

        for i in range(n_upsample_blocks):

            # check if there is a skip connection
            skip_connection = None
            if i < len(skip_connection_idx):
                skip_connection = backbone.layers[
                    skip_connection_idx[i]].output

            upsample_rate = to_tuple(upsample_rates[i])

            x = up_block(decoder_filters[i],
                         i,
                         upsample_rate=upsample_rate,
                         skip=skip_connection,
                         use_batchnorm=use_batchnorm)(x)

        x = Conv2D(classes, (3, 3), padding='same', name='final_conv')(x)
        x = Activation(activation, name=activation)(x)

        model = Model(input, x)
        res = run_image(model,
                        self.model_files,
                        img_path,
                        target_size=(256, 256, 3))
        self.assertTrue(*res)
Exemplo n.º 20
0
def convert(model, name=None, initial_types=None, doc_string='', target_opset=None,
            targeted_onnx=onnx.__version__, custom_conversion_functions=None, custom_shape_calculators=None):
    '''
    This function converts the specified CoreML model into its ONNX counterpart. Some information such as the produced
    ONNX model name can be specified.
    
    :param model: A `CoreML model <https://apple.github.io/coremltools/coremlspecification/sections/Model.html#model>`_ or
        a CoreML MLModel object
    :param initial_types: A list providing some types for some root variables. Each element is a tuple of a variable
        name and a type defined in *data_types.py*.
    :param name: The name of the graph (type: GraphProto) in the produced ONNX model (type: ModelProto)
    :param doc_string: A string attached onto the produced ONNX model
    :param target_opset: number, for example, 7 for ONNX 1.2, and 8 for ONNX 1.3.
    :param targeted_onnx: A string (for example, '1.1.2' and '1.2') used to specify the targeted ONNX version of the
        produced model. If ONNXMLTools cannot find a compatible ONNX python package, an error may be thrown.
    :param custom_conversion_functions: a dictionary for specifying the user customized conversion function
    :param custom_shape_calculators: a dictionary for specifying the user customized shape calculator
    :return: An ONNX model (type: ModelProto) which is equivalent to the input CoreML model

    Example of initial types:
    Assume that 'A' and 'B' are two root variable names used in the CoreML 
    model you want to convert. We can specify their types via:
    
    ::
    
        from onnxmltools.convert.common.data_types import FloatTensorType
        initial_type = [('A', FloatTensorType([40, 12, 1, 1])), 
                        ('B', FloatTensorType([1, 32, 1, 1]))]
    '''
    if isinstance(model, coremltools.models.MLModel):
        spec = model.get_spec()
    else:
        spec = model

    if name is None:
        name = str(uuid4().hex)

    target_opset = target_opset if target_opset else get_maximum_opset_supported()
    # Parse CoreML model as our internal data structure (i.e., Topology)
    topology = parse_coreml(spec, initial_types, target_opset, custom_conversion_functions, custom_shape_calculators)

    # Parse CoreML description, author, and license. Those information will be attached to the final ONNX model.
    metadata = spec.description.metadata
    metadata_props = []
    if metadata:
        if not doc_string and metadata.shortDescription:
            doc_string = metadata.shortDescription  # If doc_string is not specified, we use description from CoreML
        if metadata.author:
            entry = onnx_proto.StringStringEntryProto()
            entry.key = 'author'
            entry.value = metadata.author
            metadata_props.append(entry)
        if metadata.license:
            entry = onnx_proto.StringStringEntryProto()
            entry.key = 'license'
            entry.value = metadata.license
            metadata_props.append(entry)

    # Convert our Topology object into ONNX. The outcome is an ONNX model.
    onnx_model = convert_topology(topology, name, doc_string, target_opset, targeted_onnx)

    # Edit ONNX model's attributes related to CoreML's meta information
    if len(metadata_props) > 0:
        onnx_model.metadata_props.extend(metadata_props)

    return onnx_model
Exemplo n.º 21
0
def convert(model,
            name=None,
            initial_types=None,
            doc_string='',
            target_opset=None,
            targeted_onnx=onnx.__version__,
            custom_conversion_functions=None,
            custom_shape_calculators=None):
    '''
    This function produces an equivalent ONNX model of the given H2O MOJO model.
    Supported model types:
    - GBM, with limitations:
        - poisson, gamma, tweedie distributions not supported
        - multinomial distribution supported with 3 or more classes (use binomial otherwise)
    Ohter limitations:
    - modes with categorical splits not supported


    :param model: H2O MOJO model loaded into memory (see below for example)
    :param name: The name of the graph (type: GraphProto) in the produced ONNX model (type: ModelProto)
    :param initial_types: a python list. Each element is a tuple of a variable name and a type defined in data_types.py
    :param doc_string: A string attached onto the produced ONNX model
    :param target_opset: number, for example, 7 for ONNX 1.2, and 8 for ONNX 1.3.
    :param targeted_onnx: A string (for example, '1.1.2' and '1.2') used to specify the targeted ONNX version of the
        produced model. If ONNXMLTools cannot find a compatible ONNX python package, an error may be thrown.
    :param custom_conversion_functions: a dictionary for specifying the user customized conversion function
    :param custom_shape_calculators: a dictionary for specifying the user customized shape calculator
    :return: An ONNX model (type: ModelProto) which is equivalent to the input xgboost model

    :examples:

    >>> from onnxmltools.convert import convert_h2o
    >>> file = open("/path/to/h2o_mojo.zip", "rb")
    >>> mojo_content = file.read()
    >>> file.close()
    >>> h2o_onnx_model = convert_h2o(mojo_content)
    '''
    if name is None:
        name = str(uuid4().hex)
    if initial_types is None:
        initial_types = [('input', FloatTensorType(shape=['None', 'None']))]

    if isinstance(model, str):
        model_path = model
    else:
        _, model_path = tempfile.mkstemp()
        f = open(model_path, "wb")
        f.write(model)
        f.close()
    mojo_str = h2o.print_mojo(model_path, format="json")
    mojo_model = json.loads(mojo_str)
    if mojo_model["params"]["algo"] != "gbm":
        raise ValueError(
            "Model type not supported (algo=%s). Only GBM Mojo supported for now."
            % mojo_model["params"]["algo"])

    target_opset = target_opset if target_opset else get_maximum_opset_supported(
    )
    topology = parse_h2o(mojo_model, initial_types, target_opset,
                         custom_conversion_functions, custom_shape_calculators)
    topology.compile()
    onnx_model = convert_topology(topology, name, doc_string, target_opset,
                                  targeted_onnx)
    return onnx_model
Exemplo n.º 22
0
class TestNLP(unittest.TestCase):
    def setUp(self):
        self.model_files = []

    def tearDown(self):
        for fl in self.model_files:
            os.remove(fl)

    def test_addition_rnn(self):
        # An implementation of sequence to sequence learning for performing addition
        # from https://github.com/keras-team/keras/blob/master/examples/addition_rnn.py
        DIGITS = 3
        MAXLEN = DIGITS + 1 + DIGITS
        HIDDEN_SIZE = 128
        BATCH_SIZE = 128
        CHARS_LENGTH = 12

        for RNN in [
                keras.layers.LSTM, keras.layers.GRU, keras.layers.SimpleRNN
        ]:
            model = keras.models.Sequential()
            model.add(RNN(HIDDEN_SIZE, input_shape=(MAXLEN, CHARS_LENGTH)))
            model.add(keras.layers.RepeatVector(DIGITS + 1))
            model.add(RNN(HIDDEN_SIZE, return_sequences=True))
            model.add(
                keras.layers.TimeDistributed(
                    keras.layers.Dense(CHARS_LENGTH, activation='softmax')))
            onnx_model = keras2onnx.convert_keras(model, model.name)
            x = np.random.rand(BATCH_SIZE, MAXLEN,
                               CHARS_LENGTH).astype(np.float32)
            expected = model.predict(x)
            self.assertTrue(
                run_onnx_runtime(onnx_model.graph.name, onnx_model, x,
                                 expected, self.model_files))

    def test_babi_rnn(self):
        # two recurrent neural networks based upon a story and a question.
        # from https://github.com/keras-team/keras/blob/master/examples/babi_rnn.py
        RNN = keras.layers.recurrent.LSTM
        EMBED_HIDDEN_SIZE = 50
        SENT_HIDDEN_SIZE = 100
        QUERY_HIDDEN_SIZE = 100
        BATCH_SIZE = 32
        story_maxlen = 15
        vocab_size = 27
        query_maxlen = 17

        sentence = Input(shape=(story_maxlen, ), dtype='int32')
        encoded_sentence = Embedding(vocab_size, EMBED_HIDDEN_SIZE)(sentence)
        encoded_sentence = RNN(SENT_HIDDEN_SIZE)(encoded_sentence)

        question = Input(shape=(query_maxlen, ), dtype='int32')
        encoded_question = Embedding(vocab_size, EMBED_HIDDEN_SIZE)(question)
        encoded_question = RNN(QUERY_HIDDEN_SIZE)(encoded_question)

        merged = concatenate([encoded_sentence, encoded_question])
        preds = Dense(vocab_size, activation='softmax')(merged)

        model = Model([sentence, question], preds)
        onnx_model = keras2onnx.convert_keras(model, model.name)
        x = np.random.randint(5, 10,
                              size=(BATCH_SIZE, story_maxlen)).astype(np.int32)
        y = np.random.randint(5, 10,
                              size=(BATCH_SIZE, query_maxlen)).astype(np.int32)
        expected = model.predict([x, y])
        self.assertTrue(
            run_onnx_runtime(onnx_model.graph.name, onnx_model, {
                model.input_names[0]: x,
                model.input_names[1]: y
            }, expected, self.model_files))

    @unittest.skipIf(get_maximum_opset_supported() < 9,
                     "None seq_length LSTM is not supported before opset 9.")
    def test_imdb_bidirectional_lstm(self):
        # A Bidirectional LSTM on the IMDB sentiment classification task.
        # from https://github.com/keras-team/keras/blob/master/examples/imdb_bidirectional_lstm.py
        max_features = 20000
        maxlen = 100
        batch_size = 32
        model = Sequential()
        model.add(Embedding(max_features, 128, input_length=maxlen))
        model.add(Bidirectional(LSTM(64)))
        model.add(Dropout(0.5))
        model.add(Dense(1, activation='sigmoid'))
        onnx_model = keras2onnx.convert_keras(model, model.name)
        x = np.random.rand(batch_size, maxlen).astype(np.float32)
        expected = model.predict(x)
        self.assertTrue(
            run_onnx_runtime(onnx_model.graph.name, onnx_model, x, expected,
                             self.model_files))

    def test_imdb_cnn_lstm(self):
        # A recurrent convolutional network on the IMDB sentiment classification task.
        # from https://github.com/keras-team/keras/blob/master/examples/imdb_cnn_lstm.py
        max_features = 20000
        maxlen = 100
        embedding_size = 128
        kernel_size = 5
        filters = 64
        pool_size = 4
        lstm_output_size = 70
        batch_size = 30

        model = Sequential()
        model.add(Embedding(max_features, embedding_size, input_length=maxlen))
        model.add(Dropout(0.25))
        model.add(
            Conv1D(filters,
                   kernel_size,
                   padding='valid',
                   activation='relu',
                   strides=1))
        model.add(MaxPooling1D(pool_size=pool_size))
        model.add(LSTM(lstm_output_size))
        model.add(Dense(1))
        model.add(Activation('sigmoid'))
        onnx_model = keras2onnx.convert_keras(model, model.name)
        x = np.random.rand(batch_size, maxlen).astype(np.float32)
        expected = model.predict(x)
        self.assertTrue(
            run_onnx_runtime(onnx_model.graph.name, onnx_model, x, expected,
                             self.model_files))

    @unittest.skipIf(get_maximum_opset_supported() < 9,
                     "None seq_length LSTM is not supported before opset 9.")
    def test_imdb_lstm(self):
        # An LSTM model on the IMDB sentiment classification task.
        # from https://github.com/keras-team/keras/blob/master/examples/imdb_lstm.py
        max_features = 20000
        maxlen = 80
        batch_size = 32
        model = Sequential()
        model.add(Embedding(max_features, 128))
        model.add(LSTM(128, dropout=0.2, recurrent_dropout=0.2))
        model.add(Dense(1, activation='sigmoid'))
        onnx_model = keras2onnx.convert_keras(model, model.name)
        x = np.random.rand(batch_size, maxlen).astype(np.float32)
        expected = model.predict(x)
        self.assertTrue(
            run_onnx_runtime(onnx_model.graph.name, onnx_model, x, expected,
                             self.model_files))

    def test_lstm_text_generation(self):
        # Generate text from Nietzsche's writings.
        # from https://github.com/keras-team/keras/blob/master/examples/lstm_text_generation.py
        maxlen = 40
        chars_len = 20
        batch_size = 32
        model = Sequential()
        model.add(LSTM(128, input_shape=(maxlen, chars_len)))
        model.add(Dense(chars_len, activation='softmax'))
        onnx_model = keras2onnx.convert_keras(model, model.name)
        x = np.random.rand(batch_size, maxlen, chars_len).astype(np.float32)
        expected = model.predict(x)
        self.assertTrue(
            run_onnx_runtime(onnx_model.graph.name, onnx_model, x, expected,
                             self.model_files))

    def test_reuters_mlp(self):
        # An MLP on the Reuters newswire topic classification task.
        # from https://github.com/keras-team/keras/blob/master/examples/reuters_mlp.py
        max_words = 1000
        batch_size = 32
        num_classes = 20
        model = Sequential()
        model.add(Dense(512, input_shape=(max_words, )))
        model.add(Activation('relu'))
        model.add(Dropout(0.5))
        model.add(Dense(num_classes))
        model.add(Activation('softmax'))
        onnx_model = keras2onnx.convert_keras(model, model.name)
        x = np.random.rand(batch_size, max_words).astype(np.float32)
        expected = model.predict(x)
        self.assertTrue(
            run_onnx_runtime(onnx_model.graph.name, onnx_model, x, expected,
                             self.model_files))

    if __name__ == "__main__":
        unittest.main()
Exemplo n.º 23
0
from onnxconverter_common.float16 import convert_float_to_float16


def _ort_inference(mdl, inputs):
    sess = _ort.InferenceSession(mdl.SerializeToString())
    return sess.run(None, inputs)


Graph.inference_runtime = _ort_inference
Graph.opset = 9
onnx_function = Graph.trace


@unittest.skipIf(_ort.__version__ == '1.8.0',
                 "see https://github.com/microsoft/onnxruntime/issues/7981")
@unittest.skipIf(get_maximum_opset_supported() < 9,
                 "tests designed for ONNX opset 9 and greater")
class ONNXFloat16Test(unittest.TestCase):
    def test_float16(self):
        @onnx_function(outputs=['z'],
                       input_types=(_Ty.F([1, 1, 6, 1])),
                       output_types=[_Ty.f])
        def transpose_n_matmul(x):
            ox = x.ox  # type: OnnxOperatorBuilderX
            wm = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
                           12]).astype(np.float32).reshape([2, 6])
            b = ox.constant(value=wm)
            a = ox.transpose(x, perm=[0, 1, 3, 2])
            c = ox.transpose(b, perm=[1, 0])
            return ox.matmul([a, c])
Exemplo n.º 24
0
class TestUnet(unittest.TestCase):
    def setUp(self):
        self.model_files = []

    def tearDown(self):
        for fl in self.model_files:
            os.remove(fl)

    def test_unet_1(self):
        # From https://github.com/divamgupta/image-segmentation-keras/models/unet.py
        model = keras_segmentation.models.unet.unet(101)
        res = run_image(model,
                        self.model_files,
                        img_path,
                        target_size=(416, 608))
        self.assertTrue(*res)

    @unittest.skipIf(is_keras_older_than("2.2.3"),
                     "Cannot import normalize_data_format from keras.backend")
    def test_unet_2(self):
        # From https://github.com/jocicmarko/ultrasound-nerve-segmentation
        img_rows = 96
        img_cols = 96

        inputs = Input((img_rows, img_cols, 1))
        conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(inputs)
        conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv1)
        pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

        conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(pool1)
        conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv2)
        pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

        conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(pool2)
        conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv3)
        pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

        conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(pool3)
        conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv4)
        pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)

        conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(pool4)
        conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(conv5)

        up6 = concatenate([
            Conv2DTranspose(256, (2, 2), strides=(2, 2),
                            padding='same')(conv5), conv4
        ],
                          axis=3)
        conv6 = Conv2D(256, (3, 3), activation='relu', padding='same')(up6)
        conv6 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv6)

        up7 = concatenate([
            Conv2DTranspose(128, (2, 2), strides=(2, 2),
                            padding='same')(conv6), conv3
        ],
                          axis=3)
        conv7 = Conv2D(128, (3, 3), activation='relu', padding='same')(up7)
        conv7 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv7)

        up8 = concatenate([
            Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(conv7),
            conv2
        ],
                          axis=3)
        conv8 = Conv2D(64, (3, 3), activation='relu', padding='same')(up8)
        conv8 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv8)

        up9 = concatenate([
            Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(conv8),
            conv1
        ],
                          axis=3)
        conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(up9)
        conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv9)

        conv10 = Conv2D(1, (1, 1), activation='sigmoid')(conv9)

        model = Model(inputs=[inputs], outputs=[conv10])
        res = run_image(model,
                        self.model_files,
                        img_path,
                        color_mode="grayscale",
                        target_size=(img_rows, img_cols))
        self.assertTrue(*res)

    @unittest.skipIf(get_maximum_opset_supported() < 14,
                     "Need ConvTranspose-14 support.")
    def test_unet_3(self):
        # From https://github.com/yu4u/noise2noise/blob/master/model.py
        model = get_unet_model(out_ch=3, upconv=False)
        res = run_image(model,
                        self.model_files,
                        img_path,
                        target_size=(256, 256, 3))
        self.assertTrue(*res)
Exemplo n.º 25
0
def get_maximum_opset_supported():
    from . import __max_supported_opset__
    return min(__max_supported_opset__, onnx_ex.get_maximum_opset_supported())
Exemplo n.º 26
0
class TestCRNN(unittest.TestCase):
    def setUp(self):
        self.model_files = []

    def tearDown(self):
        for fl in self.model_files:
            os.remove(fl)

    @unittest.skipIf(get_maximum_opset_supported() < 10,
                     "CRNN conversion need opset >= 10.")
    def test_CRNN(self):
        img_w = 128
        img_h = 64
        input_shape = (img_w, img_h, 1)  # (128, 64, 1)
        num_classes = 80

        # Make Networkw
        inputs = Input(name='the_input', shape=input_shape,
                       dtype='float32')  # (None, 128, 64, 1)

        # Convolution layer (VGG)
        inner = Conv2D(64, (3, 3),
                       padding='same',
                       name='conv1',
                       kernel_initializer='he_normal')(
                           inputs)  # (None, 128, 64, 64)
        inner = BatchNormalization()(inner)
        inner = Activation('relu')(inner)
        inner = MaxPooling2D(pool_size=(2, 2),
                             name='max1')(inner)  # (None,64, 32, 64)

        inner = Conv2D(128, (3, 3),
                       padding='same',
                       name='conv2',
                       kernel_initializer='he_normal')(
                           inner)  # (None, 64, 32, 128)
        inner = BatchNormalization()(inner)
        inner = Activation('relu')(inner)
        inner = MaxPooling2D(pool_size=(2, 2),
                             name='max2')(inner)  # (None, 32, 16, 128)

        inner = Conv2D(256, (3, 3),
                       padding='same',
                       name='conv3',
                       kernel_initializer='he_normal')(
                           inner)  # (None, 32, 16, 256)
        inner = BatchNormalization()(inner)
        inner = Activation('relu')(inner)
        inner = Conv2D(256, (3, 3),
                       padding='same',
                       name='conv4',
                       kernel_initializer='he_normal')(
                           inner)  # (None, 32, 16, 256)
        inner = BatchNormalization()(inner)
        inner = Activation('relu')(inner)
        inner = MaxPooling2D(pool_size=(1, 2),
                             name='max3')(inner)  # (None, 32, 8, 256)

        inner = Conv2D(512, (3, 3),
                       padding='same',
                       name='conv5',
                       kernel_initializer='he_normal')(
                           inner)  # (None, 32, 8, 512)
        inner = BatchNormalization()(inner)
        inner = Activation('relu')(inner)
        inner = Conv2D(512, (3, 3), padding='same',
                       name='conv6')(inner)  # (None, 32, 8, 512)
        inner = BatchNormalization()(inner)
        inner = Activation('relu')(inner)
        inner = MaxPooling2D(pool_size=(1, 2),
                             name='max4')(inner)  # (None, 32, 4, 512)

        inner = Conv2D(512, (2, 2),
                       padding='same',
                       kernel_initializer='he_normal',
                       name='con7')(inner)  # (None, 32, 4, 512)
        inner = BatchNormalization()(inner)
        inner = Activation('relu')(inner)

        # CNN to RNN
        inner = Reshape(target_shape=((32, 2048)),
                        name='reshape')(inner)  # (None, 32, 2048)
        inner = Dense(64,
                      activation='relu',
                      kernel_initializer='he_normal',
                      name='dense1')(inner)  # (None, 32, 64)

        # RNN layer
        lstm_1 = LSTM(256,
                      return_sequences=True,
                      kernel_initializer='he_normal',
                      name='lstm1')(inner)  # (None, 32, 512)
        lstm_1b = LSTM(256,
                       return_sequences=True,
                       go_backwards=True,
                       kernel_initializer='he_normal',
                       name='lstm1_b')(inner)
        reversed_lstm_1b = Lambda(
            lambda inputTensor: K.reverse(inputTensor, axes=1))(lstm_1b)

        lstm1_merged = add([lstm_1, reversed_lstm_1b])  # (None, 32, 512)
        lstm1_merged = BatchNormalization()(lstm1_merged)

        lstm_2 = LSTM(256,
                      return_sequences=True,
                      kernel_initializer='he_normal',
                      name='lstm2')(lstm1_merged)
        lstm_2b = LSTM(256,
                       return_sequences=True,
                       go_backwards=True,
                       kernel_initializer='he_normal',
                       name='lstm2_b')(lstm1_merged)
        reversed_lstm_2b = Lambda(
            lambda inputTensor: K.reverse(inputTensor, axes=1))(lstm_2b)

        lstm2_merged = concatenate([lstm_2,
                                    reversed_lstm_2b])  # (None, 32, 1024)
        lstm2_merged = BatchNormalization()(lstm2_merged)

        # transforms RNN output to character activations:
        inner = Dense(num_classes,
                      kernel_initializer='he_normal',
                      name='dense2')(lstm2_merged)  # (None, 32, 63)
        y_pred = Activation('softmax', name='softmax')(inner)

        model = Model(inputs=[inputs], outputs=y_pred)

        data = np.random.rand(1, 128, 64, 1).astype(np.float32)
        expected = model.predict(data)
        onnx_model = keras2onnx.convert_keras(model, model.name)
        self.assertTrue(
            run_onnx_runtime(onnx_model.graph.name, onnx_model, data, expected,
                             self.model_files))

    @unittest.skipIf(test_level_0, "Test level 0 only.")
    def test_CRNN_GRU(self):
        img_w = 128
        img_h = 64
        num_classes = 80
        input_shape = (img_w, img_h, 1)  # (128, 64, 1)

        # Make Networkw
        inputs = Input(name='the_input', shape=input_shape,
                       dtype='float32')  # (None, 128, 64, 1)

        # Convolution layer (VGG)
        inner = Conv2D(64, (3, 3),
                       padding='same',
                       name='conv1',
                       kernel_initializer='he_normal')(
                           inputs)  # (None, 128, 64, 64)
        inner = BatchNormalization()(inner)
        inner = Activation('relu')(inner)
        inner = MaxPooling2D(pool_size=(2, 2),
                             name='max1')(inner)  # (None,64, 32, 64)

        inner = Conv2D(128, (3, 3),
                       padding='same',
                       name='conv2',
                       kernel_initializer='he_normal')(
                           inner)  # (None, 64, 32, 128)
        inner = BatchNormalization()(inner)
        inner = Activation('relu')(inner)
        inner = MaxPooling2D(pool_size=(2, 2),
                             name='max2')(inner)  # (None, 32, 16, 128)

        inner = Conv2D(256, (3, 3),
                       padding='same',
                       name='conv3',
                       kernel_initializer='he_normal')(
                           inner)  # (None, 32, 16, 256)
        inner = BatchNormalization()(inner)
        inner = Activation('relu')(inner)
        inner = Conv2D(256, (3, 3),
                       padding='same',
                       name='conv4',
                       kernel_initializer='he_normal')(
                           inner)  # (None, 32, 16, 256)
        inner = BatchNormalization()(inner)
        inner = Activation('relu')(inner)
        inner = MaxPooling2D(pool_size=(1, 2),
                             name='max3')(inner)  # (None, 32, 8, 256)

        inner = Conv2D(512, (3, 3),
                       padding='same',
                       name='conv5',
                       kernel_initializer='he_normal')(
                           inner)  # (None, 32, 8, 512)
        inner = BatchNormalization()(inner)
        inner = Activation('relu')(inner)
        inner = Conv2D(512, (3, 3), padding='same',
                       name='conv6')(inner)  # (None, 32, 8, 512)
        inner = BatchNormalization()(inner)
        inner = Activation('relu')(inner)
        inner = MaxPooling2D(pool_size=(1, 2),
                             name='max4')(inner)  # (None, 32, 4, 512)

        inner = Conv2D(512, (2, 2),
                       padding='same',
                       kernel_initializer='he_normal',
                       name='con7')(inner)  # (None, 32, 4, 512)
        inner = BatchNormalization()(inner)
        inner = Activation('relu')(inner)

        # CNN to RNN
        inner = Reshape(target_shape=((32, 2048)),
                        name='reshape')(inner)  # (None, 32, 2048)
        inner = Dense(64,
                      activation='relu',
                      kernel_initializer='he_normal',
                      name='dense1')(inner)  # (None, 32, 64)

        # RNN layer
        gru_1 = GRU(256,
                    return_sequences=True,
                    kernel_initializer='he_normal',
                    name='gru1')(inner)  # (None, 32, 512)
        gru_1b = GRU(256,
                     return_sequences=True,
                     go_backwards=True,
                     kernel_initializer='he_normal',
                     name='gru1_b')(inner)
        reversed_gru_1b = Lambda(
            lambda inputTensor: K.reverse(inputTensor, axes=1))(gru_1b)

        gru1_merged = add([gru_1, reversed_gru_1b])  # (None, 32, 512)
        gru1_merged = BatchNormalization()(gru1_merged)

        gru_2 = GRU(256,
                    return_sequences=True,
                    kernel_initializer='he_normal',
                    name='gru2')(gru1_merged)
        gru_2b = GRU(256,
                     return_sequences=True,
                     go_backwards=True,
                     kernel_initializer='he_normal',
                     name='gru2_b')(gru1_merged)
        reversed_gru_2b = Lambda(
            lambda inputTensor: K.reverse(inputTensor, axes=1))(gru_2b)

        gru2_merged = concatenate([gru_2, reversed_gru_2b])  # (None, 32, 1024)
        gru2_merged = BatchNormalization()(gru2_merged)

        # transforms RNN output to character activations:
        inner = Dense(num_classes,
                      kernel_initializer='he_normal',
                      name='dense2')(gru2_merged)  # (None, 32, 63)
        y_pred = Activation('softmax', name='softmax')(inner)

        model = Model(inputs=[inputs], outputs=y_pred)

        data = np.random.rand(1, 128, 64, 1).astype(np.float32)
        expected = model.predict(data)
        onnx_model = keras2onnx.convert_keras(model, model.name)
        self.assertTrue(
            run_onnx_runtime(onnx_model.graph.name, onnx_model, data, expected,
                             self.model_files))