Exemplo n.º 1
0
 def save_onnx(self, save_folder, save_name='base_model.onnx'):
     os.environ["TF_KERAS"] = '1'
     import efficientnet.tfkeras as efn
     import keras2onnx
     onnx_model = keras2onnx.convert_keras(self.base_model,
                                           self.base_model.name)
     keras2onnx.save_model(onnx_model, os.path.join(save_folder, save_name))
Exemplo n.º 2
0
def main():
    train(train_generator)
    # save keras model
    model_folder = 'model'
    if not os.path.exists(model_folder):
        os.mkdir(model_folder)
    json_file = os.path.join(model_folder, 'sample.json')
    yaml_file = os.path.join(model_folder, 'sample.yaml')
    h5_file = os.path.join(model_folder, 'sample.hdf5')

    json_string = model.to_json()
    open(json_file, 'w').write(json_string)
    yaml_string = model.to_yaml()
    open(yaml_file, 'w').write(yaml_string)
    model.save_weights(h5_file)

    onnx_folder = 'onnx'
    if not os.path.exists(onnx_folder):
        os.mkdir(onnx_folder)
    onnx_path = os.path.join(onnx_folder, 'sample.onnx')
    onnx_model = keras2onnx.convert_keras(model, 'sample', target_opset=qumico.SUPPORT_ONNX_OPSET)
    onnx.save_model(onnx_model, onnx_path)

    print(h5_file, "を作成しました。")
    print('onnxファイルを生成しました。出力先:', onnx_path)
Exemplo n.º 3
0
    def test_NBeats(self):
        K.clear_session()
        num_samples, time_steps, input_dim, output_dim = 50000, 10, 1, 1

        # Definition of the model.
        keras_model = NBeatsNet(backcast_length=time_steps,
                                forecast_length=output_dim,
                                stack_types=(NBeatsNet.GENERIC_BLOCK,
                                             NBeatsNet.GENERIC_BLOCK),
                                nb_blocks_per_stack=2,
                                thetas_dim=(4, 4),
                                share_weights_in_stack=True,
                                hidden_layer_units=64)
        data = np.random.rand(num_samples, time_steps,
                              input_dim).astype(np.float32)
        expected = keras_model.predict(data)
        onnx_model = keras2onnx.convert_keras(keras_model, keras_model.name)
        self.assertTrue(
            run_keras_and_ort(onnx_model.graph.name,
                              onnx_model,
                              keras_model,
                              data,
                              expected,
                              self.model_files,
                              compare_perf=True))
Exemplo n.º 4
0
    def _test_keras_model(self,
                          model,
                          model_name='onnx_conversion',
                          rtol=1.e-3,
                          atol=1.e-5,
                          img_size=224):
        img_path = os.path.join(os.path.dirname(__file__), 'data',
                                'elephant.jpg')
        try:
            img = image.load_img(img_path, target_size=(img_size, img_size))
            x = image.img_to_array(img)
            x = np.expand_dims(x, axis=0)
            x = preprocess_input(x)

            preds = model.predict(x)
            onnx_model = keras2onnx.convert_keras(model, model.name)
            self.assertTrue(
                self.run_onnx_runtime(model_name,
                                      onnx_model,
                                      x,
                                      preds,
                                      rtol=rtol,
                                      atol=atol))
        except FileNotFoundError:
            self.assertTrue(False, 'The image data does not exist.')
def convert_tensorflow(nlp: Pipeline, opset: int, output: str):
    if not is_tf_available():
        raise Exception(
            "Cannot convert {} because TF is not installed. Please install torch first."
            .format(args.model))

    print(
        "/!\\ Please note TensorFlow doesn't support exporting model > 2Gb /!\\"
    )

    try:
        import tensorflow as tf
        from keras2onnx import convert_keras, save_model, __version__ as k2ov

        print("TensorFlow: {}, keras2onnx: {}".format(tf.version.VERSION,
                                                      k2ov))

        # Build
        input_names, output_names, dynamic_axes, tokens = infer_shapes(
            nlp, "tf")

        # Forward
        nlp.model.predict(tokens.data)
        onnx_model = convert_keras(nlp.model,
                                   nlp.model.name,
                                   target_opset=opset)
        save_model(onnx_model, output)

    except ImportError as e:
        raise Exception(
            "Cannot import {} required to convert TF model to ONNX. Please install {} first."
            .format(e.name, e.name))
Exemplo n.º 6
0
    def test_channel_last(self):
        N, C, H, W = 2, 3, 5, 5
        x = np.random.rand(N, H, W, C).astype(np.float32, copy=False)

        model = keras.models.Sequential()
        model.add(
            keras.layers.Conv2D(2,
                                kernel_size=(1, 2),
                                strides=(1, 1),
                                padding='valid',
                                input_shape=(H, W, C),
                                data_format='channels_last'))
        model.add(
            keras.layers.MaxPooling2D((2, 2),
                                      strides=(2, 2),
                                      data_format='channels_last'))

        model.compile(optimizer='sgd', loss='mse')
        onnx_model = keras2onnx.convert_keras(
            model, channel_first_inputs=[model.inputs[0].name])

        expected = model.predict(x)
        self.assertIsNotNone(expected)
        self.assertIsNotNone(onnx_model)

        x = np.transpose(x.astype(np.float32), [0, 3, 1, 2])
        self.assertTrue(
            self.run_onnx_runtime('channel_last_input', onnx_model, x,
                                  expected))
Exemplo n.º 7
0
    def test_recursive_and_shared_model(self):
        from keras.layers import Input, Dense, Add, Activation
        N, C, D = 2, 3, 3
        x = np.random.rand(N, C).astype(np.float32, copy=False)

        sub_input1 = Input(shape=(C, ))
        sub_mapped1 = Dense(D)(sub_input1)
        sub_output1 = Activation('sigmoid')(sub_mapped1)
        sub_model1 = keras.Model(inputs=sub_input1, outputs=sub_output1)

        sub_input2 = Input(shape=(C, ))
        sub_mapped2 = sub_model1(sub_input2)
        sub_output2 = Activation('tanh')(sub_mapped2)
        sub_model2 = keras.Model(inputs=sub_input2, outputs=sub_output2)

        input1 = Input(shape=(D, ))
        input2 = Input(shape=(D, ))
        mapped1_1 = Activation('tanh')(input1)
        mapped2_1 = Activation('sigmoid')(input2)
        mapped1_2 = sub_model1(mapped1_1)
        mapped1_3 = sub_model1(mapped1_2)
        mapped2_2 = sub_model2(mapped2_1)
        sub_sum = Add()([mapped1_3, mapped2_2])
        keras_model = keras.Model(inputs=[input1, input2], outputs=sub_sum)
        onnx_model = keras2onnx.convert_keras(keras_model, keras_model.name)

        x = [x, 2 * x]
        expected = keras_model.predict(x)
        self.assertTrue(
            self.run_onnx_runtime('recursive_and_shared', onnx_model, x,
                                  expected))
Exemplo n.º 8
0
def convert_model(yolo, model_file_name, target_opset):
    yolo.load_model()
    onnxmodel = convert_keras(yolo.final_model,
                              target_opset=target_opset,
                              channel_first_inputs=['input_1'])
    onnx.save_model(onnxmodel, model_file_name)
    return onnxmodel
 def save_onnx(self, model_dir: str, version: int = 1):
     """Save/Export Critic model in ONNX format"""
     critic_model_save_path = os.path.join(model_dir, "critic",
                                           str(version), "model.onnx")
     onnx_model = keras2onnx.convert_keras(self.model, self.model.name)
     keras2onnx.save_model(onnx_model, critic_model_save_path)
     print(f"Critic model saved in ONNX format at:{critic_model_save_path}")
Exemplo n.º 10
0
 def test_TFGPT2(self):
     if enable_full_transformer_test:
         from transformers import GPT2Config, TFGPT2Model, TFGPT2LMHeadModel, TFGPT2DoubleHeadsModel
         model_list = [
             TFGPT2Model, TFGPT2LMHeadModel, TFGPT2DoubleHeadsModel
         ]
     else:
         from transformers import GPT2Config, TFGPT2Model
         model_list = [TFGPT2Model]
     # pretrained_weights = 'gpt2'
     tokenizer_file = 'gpt2_gpt2.pickle'
     tokenizer = self._get_tokenzier(tokenizer_file)
     text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
     config = GPT2Config()
     for model_instance_ in model_list:
         keras.backend.clear_session()
         model = model_instance_(config)
         model._set_inputs(inputs)
         predictions_original = model(inputs)
         predictions = [predictions_original[0]] + list(
             v_.numpy() for v_ in predictions_original[1])
         onnx_model = keras2onnx.convert_keras(model, model.name)
         self.assertTrue(
             run_onnx_runtime(onnx_model.graph.name,
                              onnx_model,
                              inputs_onnx,
                              predictions,
                              self.model_files,
                              rtol=1.e-2,
                              atol=1.e-4))
Exemplo n.º 11
0
def kconversion():
    model = keras.models.load_model('model_keras')
    plot_model(model,
               to_file="model.png",
               show_shapes=True,
               show_layer_names=True,
               rankdir='TB',
               expand_nested=True,
               dpi=96)
    onnx_model = keras2onnx.convert_keras(model,
                                          'model0.onnx',
                                          debug_mode=True)
    output_model_path = "./model0.onnx"
    # and save the model in ONNX format
    keras2onnx.save_model(onnx_model, output_model_path)
    onnx_model = onnx.load("model0.onnx")
    s = MessageToJson(onnx_model)
    onnx_json = json.loads(s)
    # Convert JSON to String
    onnx_str = json.dumps(onnx_json)
    with open("model1.json", "w") as json_file:
        json_file.write(onnx_str)
    resp = make_response(onnx_str)
    resp.headers['Access-Control-Allow-Origin'] = '*'
    return resp
    def test_keras_resnet_batchnormalization(self):
        N, C, H, W = 2, 3, 120, 120
        import keras_resnet

        model = Sequential()
        model.add(
            ZeroPadding2D(padding=((3, 3), (3, 3)),
                          input_shape=(H, W, C),
                          data_format='channels_last'))
        model.add(
            Conv2D(64,
                   kernel_size=(7, 7),
                   strides=(2, 2),
                   padding='valid',
                   dilation_rate=(1, 1),
                   use_bias=False,
                   data_format='channels_last'))
        model.add(keras_resnet.layers.BatchNormalization(freeze=True, axis=3))

        onnx_model = keras2onnx.convert_keras(model, model.name)
        data = np.random.rand(N, H, W, C).astype(np.float32).reshape(
            (N, H, W, C))
        expected = model.predict(data)
        self.assertTrue(
            run_onnx_runtime(onnx_model.graph.name, onnx_model, data, expected,
                             self.model_files))
Exemplo n.º 13
0
    def test_babi_rnn(self):
        # two recurrent neural networks based upon a story and a question.
        # from https://github.com/keras-team/keras/blob/master/examples/babi_rnn.py
        RNN = keras.layers.recurrent.LSTM
        EMBED_HIDDEN_SIZE = 50
        SENT_HIDDEN_SIZE = 100
        QUERY_HIDDEN_SIZE = 100
        BATCH_SIZE = 32
        story_maxlen = 15
        vocab_size = 27
        query_maxlen = 17

        sentence = Input(shape=(story_maxlen, ), dtype='int32')
        encoded_sentence = Embedding(vocab_size, EMBED_HIDDEN_SIZE)(sentence)
        encoded_sentence = RNN(SENT_HIDDEN_SIZE)(encoded_sentence)

        question = Input(shape=(query_maxlen, ), dtype='int32')
        encoded_question = Embedding(vocab_size, EMBED_HIDDEN_SIZE)(question)
        encoded_question = RNN(QUERY_HIDDEN_SIZE)(encoded_question)

        merged = concatenate([encoded_sentence, encoded_question])
        preds = Dense(vocab_size, activation='softmax')(merged)

        model = Model([sentence, question], preds)
        onnx_model = keras2onnx.convert_keras(model, model.name)
        x = np.random.randint(5, 10,
                              size=(BATCH_SIZE, story_maxlen)).astype(np.int32)
        y = np.random.randint(5, 10,
                              size=(BATCH_SIZE, query_maxlen)).astype(np.int32)
        expected = model.predict([x, y])
        self.assertTrue(
            run_onnx_runtime(onnx_model.graph.name, onnx_model, {
                model.input_names[0]: x,
                model.input_names[1]: y
            }, expected, self.model_files))
Exemplo n.º 14
0
 def test_CNN_LSTM(self):
     K.clear_session()
     max_len = 20
     vocab_size = 50
     lstm_output_size = 70
     embedding_size = 100
     model = Sequential()
     model.add(
         Embedding(input_dim=vocab_size,
                   input_length=max_len,
                   output_dim=embedding_size))
     model.add(SpatialDropout1D(0.2))
     model.add(
         Conv1D(filters=256,
                kernel_size=5,
                padding='same',
                activation='relu'))
     model.add(MaxPooling1D(pool_size=4))
     model.add(LSTM(lstm_output_size))
     model.add(Dense(units=30, activation='softmax'))
     data = np.random.rand(2, max_len).astype(np.float32)
     expected = model.predict(data)
     onnx_model = keras2onnx.convert_keras(model, model.name)
     self.assertTrue(
         run_keras_and_ort(onnx_model.graph.name, onnx_model, model, data,
                           expected, self.model_files))
Exemplo n.º 15
0
    def test_MLSTM_FCN(self):
        K.clear_session()
        ip = Input(shape=(MAX_NB_VARIABLES, MAX_TIMESTEPS))

        x = Masking()(ip)
        x = LSTM(8)(x)
        x = Dropout(0.8)(x)

        y = Permute((2, 1))(ip)
        y = Conv1D(128, 8, padding='same', kernel_initializer='he_uniform')(y)
        y = BatchNormalization()(y)
        y = Activation('relu')(y)
        y = squeeze_excite_block(y)

        y = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(y)
        y = BatchNormalization()(y)
        y = Activation('relu')(y)
        y = squeeze_excite_block(y)

        y = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(y)
        y = BatchNormalization()(y)
        y = Activation('relu')(y)

        y = GlobalAveragePooling1D()(y)

        x = concatenate([x, y])
        out = Dense(NB_CLASS, activation='softmax')(x)
        keras_model = Model(ip, out)
        data = np.random.rand(2, MAX_NB_VARIABLES,
                              MAX_TIMESTEPS).astype(np.float32)
        expected = keras_model.predict(data)
        onnx_model = keras2onnx.convert_keras(keras_model, keras_model.name)
        self.assertTrue(
            run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model,
                              data, expected, self.model_files))
Exemplo n.º 16
0
    def test_addition_rnn(self):
        # An implementation of sequence to sequence learning for performing addition
        # from https://github.com/keras-team/keras/blob/master/examples/addition_rnn.py
        DIGITS = 3
        MAXLEN = DIGITS + 1 + DIGITS
        HIDDEN_SIZE = 128
        BATCH_SIZE = 128
        CHARS_LENGTH = 12

        for RNN in [
                keras.layers.LSTM, keras.layers.GRU, keras.layers.SimpleRNN
        ]:
            model = keras.models.Sequential()
            model.add(RNN(HIDDEN_SIZE, input_shape=(MAXLEN, CHARS_LENGTH)))
            model.add(keras.layers.RepeatVector(DIGITS + 1))
            model.add(RNN(HIDDEN_SIZE, return_sequences=True))
            model.add(
                keras.layers.TimeDistributed(
                    keras.layers.Dense(CHARS_LENGTH, activation='softmax')))
            onnx_model = keras2onnx.convert_keras(model, model.name)
            x = np.random.rand(BATCH_SIZE, MAXLEN,
                               CHARS_LENGTH).astype(np.float32)
            expected = model.predict(x)
            self.assertTrue(
                run_onnx_runtime(onnx_model.graph.name, onnx_model, x,
                                 expected, self.model_files))
Exemplo n.º 17
0
 def test_ecg_classification(self):
     model = Sequential()
     model.add(Conv2D(64, (3, 3), strides=(1, 1), input_shape=[128, 128, 3], kernel_initializer='glorot_uniform'))
     model.add(keras.layers.ELU())
     model.add(BatchNormalization())
     model.add(Conv2D(64, (3, 3), strides=(1, 1), kernel_initializer='glorot_uniform'))
     model.add(keras.layers.ELU())
     model.add(BatchNormalization())
     model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
     model.add(Conv2D(128, (3, 3), strides=(1, 1), kernel_initializer='glorot_uniform'))
     model.add(keras.layers.ELU())
     model.add(BatchNormalization())
     model.add(Conv2D(128, (3, 3), strides=(1, 1), kernel_initializer='glorot_uniform'))
     model.add(keras.layers.ELU())
     model.add(BatchNormalization())
     model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
     model.add(Conv2D(256, (3, 3), strides=(1, 1), kernel_initializer='glorot_uniform'))
     model.add(keras.layers.ELU())
     model.add(BatchNormalization())
     model.add(Conv2D(256, (3, 3), strides=(1, 1), kernel_initializer='glorot_uniform'))
     model.add(keras.layers.ELU())
     model.add(BatchNormalization())
     model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
     model.add(Flatten())
     model.add(Dense(2048))
     model.add(keras.layers.ELU())
     model.add(BatchNormalization())
     model.add(Dropout(0.5))
     model.add(Dense(7, activation='softmax'))
     onnx_model = keras2onnx.convert_keras(model, model.name)
     data = np.random.rand(2, 128, 128, 3).astype(np.float32)
     expected = model.predict(data)
     self.assertTrue(
         run_keras_and_ort(onnx_model.graph.name, onnx_model, model, data, expected, self.model_files))
Exemplo n.º 18
0
def test_mlf(runner):
    tf.keras.backend.clear_session()
    mlf = MLP()
    np_input = tf.random.normal((2, 20))
    expected = mlf.predict(np_input)
    oxml = keras2onnx.convert_keras(mlf)
    assert runner('lenet', oxml, np_input.numpy(), expected)
Exemplo n.º 19
0
    def test_TFXLNet(self):
        if enable_full_transformer_test:
            from transformers import XLNetConfig, TFXLNetModel, TFXLNetLMHeadModel, TFXLNetForSequenceClassification, \
                TFXLNetForTokenClassification, TFXLNetForQuestionAnsweringSimple, XLNetTokenizer
            model_list = [TFXLNetModel, TFXLNetLMHeadModel, TFXLNetForSequenceClassification, \
                TFXLNetForTokenClassification, TFXLNetForQuestionAnsweringSimple]
        else:
            from transformers import XLNetConfig, TFXLNetModel, XLNetTokenizer
            model_list = [TFXLNetModel]

        # XLNetTokenizer need SentencePiece, so the pickle file does not work here.
        tokenizer = XLNetTokenizer.from_pretrained('xlnet-large-cased')
        config = XLNetConfig(n_layer=2)
        # The model with input mask has MatrixDiagV3 which is not a registered function/op
        token = np.asarray(tokenizer.encode(self.text_str,
                                            add_special_tokens=True),
                           dtype=np.int32)
        inputs_onnx = {'input_1': np.expand_dims(token, axis=0)}
        inputs = tf.constant(token)[None, :]  # Batch size 1

        for model_instance_ in model_list:
            keras.backend.clear_session()
            model = model_instance_(config)
            predictions = model.predict(inputs)
            onnx_model = keras2onnx.convert_keras(model)
            self.assertTrue(
                run_onnx_runtime(onnx_model.graph.name,
                                 onnx_model,
                                 inputs_onnx,
                                 predictions,
                                 self.model_files,
                                 rtol=1.e-2,
                                 atol=1.e-4))
Exemplo n.º 20
0
    def test_lstm_fcn(self):
        MAX_SEQUENCE_LENGTH = 176
        NUM_CELLS = 8
        NB_CLASS = 37
        ip = Input(shape=(1, MAX_SEQUENCE_LENGTH))

        x = LSTM(NUM_CELLS)(ip)
        x = Dropout(0.8)(x)

        y = Permute((2, 1))(ip)
        y = Conv1D(128, 8, padding='same', kernel_initializer='he_uniform')(y)
        y = BatchNormalization()(y)
        y = Activation('relu')(y)

        y = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(y)
        y = BatchNormalization()(y)
        y = Activation('relu')(y)

        y = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(y)
        y = BatchNormalization()(y)
        y = Activation('relu')(y)

        y = GlobalAveragePooling1D()(y)

        x = concatenate([x, y])

        out = Dense(NB_CLASS, activation='softmax')(x)

        model = Model(ip, out)
        onnx_model = keras2onnx.convert_keras(model, model.name)
        batch_size = 2
        data = np.random.rand(batch_size, 1, MAX_SEQUENCE_LENGTH).astype(np.float32).reshape(batch_size, 1, MAX_SEQUENCE_LENGTH)
        expected = model.predict(data)
        self.assertTrue(run_keras_and_ort(onnx_model.graph.name, onnx_model, model, data, expected, self.model_files))
 def export_onnx_model(self):
     onnx_model = keras2onnx.convert_keras(
         self.model,
         self.path2model + '/' + self.version + '_' + self.time_scale)
     onnx.save_model(
         onnx_model,
         self.path2onnx_model + '/' + self.version + '_' + self.time_scale)
Exemplo n.º 22
0
def main():
    """Converts a keras model into ONNX format."""
    # model = alexnet((224, 224, 3))
    model = build_model(
        NASNetMobile(input_shape=(224, 224, 3),
                     include_top=False,
                     weights='imagenet'))
    model.load_weights(KERAS_MODEL_PATH)

    # If we have not specified explicitly image dimensions when creating
    # the model
    #
    # model = load_model(KERAS_MODEL_PATH)
    # model._layers[0].batch_input_shape = (batch_size, image_size, image_size,
    #                                       channels)
    #
    # In order for the input_shape to be saved correctly we have to
    # clone the model into a new one
    #
    # model = clone_model(model)
    #
    # When cloning we loose the weights, load them again
    #
    # model.load_weights(KERAS_MODEL_PATH)

    onnx_model = keras2onnx.convert_keras(model, model.name)

    # target_opset=target_opset,
    # debug_mode=True

    keras2onnx.save_model(onnx_model, ONNX_MODEL_PATH)
Exemplo n.º 23
0
def convert_tensorflow(nlp: Pipeline, opset: int, output: Path):
    """
    Export a TensorFlow backed pipeline to ONNX Intermediate Representation (IR)
    Args:
        nlp: The pipeline to be exported
        opset: The actual version of the ONNX operator set to use
        output: Path where will be stored the generated ONNX model

    Notes: TensorFlow cannot export model bigger than 2GB due to internal constraint from TensorFlow

    """
    if not is_tf_available():
        raise Exception("Cannot convert because TF is not installed. Please install tensorflow first.")

    print("/!\\ Please note TensorFlow doesn't support exporting model > 2Gb /!\\")

    try:
        import tensorflow as tf

        from keras2onnx import __version__ as k2ov
        from keras2onnx import convert_keras, save_model

        print(f"Using framework TensorFlow: {tf.version.VERSION}, keras2onnx: {k2ov}")

        # Build
        input_names, output_names, dynamic_axes, tokens = infer_shapes(nlp, "tf")

        # Forward
        nlp.model.predict(tokens.data)
        onnx_model = convert_keras(nlp.model, nlp.model.name, target_opset=opset)
        save_model(onnx_model, output.as_posix())

    except ImportError as e:
        raise Exception(f"Cannot import {e.name} required to convert TF model to ONNX. Please install {e.name} first.")
Exemplo n.º 24
0
    def test_imdb_cnn_lstm(self):
        # A recurrent convolutional network on the IMDB sentiment classification task.
        # from https://github.com/keras-team/keras/blob/master/examples/imdb_cnn_lstm.py
        max_features = 20000
        maxlen = 100
        embedding_size = 128
        kernel_size = 5
        filters = 64
        pool_size = 4
        lstm_output_size = 70
        batch_size = 30

        model = Sequential()
        model.add(Embedding(max_features, embedding_size, input_length=maxlen))
        model.add(Dropout(0.25))
        model.add(
            Conv1D(filters,
                   kernel_size,
                   padding='valid',
                   activation='relu',
                   strides=1))
        model.add(MaxPooling1D(pool_size=pool_size))
        model.add(LSTM(lstm_output_size))
        model.add(Dense(1))
        model.add(Activation('sigmoid'))
        onnx_model = keras2onnx.convert_keras(model, model.name)
        x = np.random.rand(batch_size, maxlen).astype(np.float32)
        expected = model.predict(x)
        self.assertTrue(
            run_onnx_runtime(onnx_model.graph.name, onnx_model, x, expected,
                             self.model_files))
Exemplo n.º 25
0
    def test_deep_speech_2(self):
        K.clear_session()
        input_dim = 20
        output_dim = 10
        rnn_units = 800
        # Define input tensor [batch, time, features]
        input_tensor = layers.Input([None, input_dim], name='X')

        # Add 4th dimension [batch, time, frequency, channel]
        x = layers.Lambda(keras.backend.expand_dims,
                          arguments=dict(axis=-1))(input_tensor)
        x = layers.Conv2D(filters=32,
                          kernel_size=[11, 41],
                          strides=[2, 2],
                          padding='same',
                          use_bias=False,
                          name='conv_1')(x)
        x = layers.BatchNormalization(name='conv_1_bn')(x)
        x = layers.ReLU(name='conv_1_relu')(x)

        x = layers.Conv2D(filters=32,
                          kernel_size=[11, 21],
                          strides=[1, 2],
                          padding='same',
                          use_bias=False,
                          name='conv_2')(x)
        x = layers.BatchNormalization(name='conv_2_bn')(x)
        x = layers.ReLU(name='conv_2_relu')(x)
        # We need to squeeze to 3D tensor. Thanks to the stride in frequency
        # domain, we reduce the number of features four times for each channel.
        x = layers.Reshape([-1, input_dim//4*32])(x)

        for i in [1, 2, 3, 4, 5]:
            recurrent = layers.GRU(units=rnn_units,
                                   activation='tanh',
                                   recurrent_activation='sigmoid',
                                   use_bias=True,
                                   return_sequences=True,
                                   reset_after=True,
                                   name='gru_'+str(i))
            x = layers.Bidirectional(recurrent,
                                     name='bidirectional'+str(i),
                                     merge_mode='concat')(x)
            x = layers.Dropout(rate=0.5)(x) if i < 5 else x  # Only between

        # Return at each time step logits along characters. Then CTC
        # computation is more stable, in contrast to the softmax.
        x = layers.TimeDistributed(layers.Dense(units=rnn_units*2), name='dense_1')(x)
        x = layers.ReLU(name='dense_1_relu')(x)
        x = layers.Dropout(rate=0.5)(x)
        output_tensor = layers.TimeDistributed(layers.Dense(units=output_dim),
                                               name='dense_2')(x)

        model = keras.Model(input_tensor, output_tensor, name='DeepSpeech2')
        data = np.random.rand(2, 3, input_dim).astype(np.float32)
        expected = model.predict(data)
        onnx_model = keras2onnx.convert_keras(model, model.name)
        self.assertTrue(
            run_keras_and_ort(onnx_model.graph.name, onnx_model, model, data, expected, self.model_files))
Exemplo n.º 26
0
    def run(self, max_episodes=500, max_timesteps=1000):
        """
            Run a Deep RL algorithm (inspired by https://keon.io/deep-q-learning/)
        """
        unity_file = self.download_unity_env()
        env = self.get_gym_env(unity_file)
        # ========================================== #

        model = build_model(self.params['observation_size'])

        print('Training...')
        for episode in range(max_episodes):
            print('Running episode {} of {}'.format(episode + 1, max_episodes),
                  end='\r')
            # observation == state
            observation = env.reset()
            observation = np.reshape(observation,
                                     (1, self.params['observation_size']))

            for step in range(max_timesteps):
                if np.random.rand() <= self.params['epsilon']:
                    # The agent acts randomly
                    action = [env.action_space.sample()]
                else:
                    action = model.predict(observation)

                observation, reward, done, info = env.step(action)

                action_val = action[0]
                targets = np.zeros(len(action_val))
                for i in range(len(action_val)):
                    targets[i] = reward + self.params['gamma'] * action_val[i]

                observation = np.reshape(observation,
                                         (1, self.params['observation_size']))
                model.fit(observation,
                          np.asarray([targets]),
                          epochs=1,
                          verbose=0)

                # if objective reached, no need to continue
                if done:
                    break

        # Note: the content of /opt/ml/model and /opt/ml/output is automatically uploaded
        # to previously selected bucket (by the estimator) at the end of the execution
        # os.environ['SM_MODEL_DIR'] correspongs to /opt/ml/model
        model_path = os.path.join(os.environ['SM_MODEL_DIR'], 'tf_rldemo.onnx')

        # Note: converting Keras model to ONNX one for being
        #       later converted into Barracuda format
        #       In fact, this model can not be directly employed in Unity ml-agents
        #       More info can be found here: https://github.com/onnx/keras-onnx
        onnx_model = keras2onnx.convert_keras(model, model.name)
        onnx.save_model(onnx_model, model_path)
        print('\nTraining finished!')

        # ========================================== #
        TfTrainer.close_env(env)
Exemplo n.º 27
0
def export_onnx_model_from_tf(model_name, opset_version,
                              use_external_data_format, model_type,
                              model_class, cache_dir, onnx_dir, input_names,
                              use_gpu, precision, optimize_onnx, validate_onnx,
                              use_raw_attention_mask, overwrite,
                              model_fusion_statistics):

    config = AutoConfig.from_pretrained(model_name, cache_dir=cache_dir)

    model = load_pretrained_model(model_name,
                                  config=config,
                                  cache_dir=cache_dir,
                                  custom_model_class=model_class,
                                  if_tf_model=True)

    model._saved_model_inputs_spec = None

    tokenizer = AutoTokenizer.from_pretrained(model_name, cache_dir=cache_dir)
    max_input_size = tokenizer.max_model_input_sizes[
        model_name] if model_name in tokenizer.max_model_input_sizes else 1024

    example_inputs = tokenizer.encode_plus("This is a sample input",
                                           return_tensors="tf",
                                           max_length=max_input_size,
                                           pad_to_max_length=True,
                                           truncation=True)

    example_inputs = filter_inputs(example_inputs, input_names)

    example_outputs = model(example_inputs, training=False)

    # Flatten is needed for gpt2 and distilgpt2.
    example_outputs_flatten = flatten(example_outputs)
    example_outputs_flatten = update_flatten_list(example_outputs_flatten, [])

    onnx_model_path = get_onnx_file_path(onnx_dir, model_name,
                                         len(input_names), False, use_gpu,
                                         precision, False,
                                         use_external_data_format)

    if overwrite or not os.path.exists(onnx_model_path):
        logger.info("Exporting ONNX model to {}".format(onnx_model_path))
        import keras2onnx
        onnx_model = keras2onnx.convert_keras(model,
                                              model.name,
                                              target_opset=opset_version)
        keras2onnx.save_model(onnx_model, onnx_model_path)
    else:
        logger.info(f"Skip export since model existed: {onnx_model_path}")

    model_type = model_type + '_keras'

    onnx_model_file, is_valid_onnx_model, vocab_size = validate_and_optimize_onnx(
        model_name, use_external_data_format, model_type, onnx_dir,
        input_names, use_gpu, precision, optimize_onnx, validate_onnx,
        use_raw_attention_mask, overwrite, config, model_fusion_statistics,
        onnx_model_path, example_inputs, example_outputs_flatten)

    return onnx_model_file, is_valid_onnx_model, vocab_size, max_input_size
Exemplo n.º 28
0
 def test_GAN(self):
     keras_model = GAN().combined
     x = np.random.rand(5, 100).astype(np.float32)
     expected = keras_model.predict(x)
     onnx_model = keras2onnx.convert_keras(keras_model, keras_model.name)
     self.assertTrue(
         run_onnx_runtime(onnx_model.graph.name, onnx_model, x, expected,
                          self.model_files))
Exemplo n.º 29
0
 def test_Pix2Pix(self):
     keras_model = Pix2Pix().combined
     batch = 5
     x = np.random.rand(batch, 256, 256, 3).astype(np.float32)
     y = np.random.rand(batch, 256, 256, 3).astype(np.float32)
     expected = keras_model.predict([x, y])
     onnx_model = keras2onnx.convert_keras(keras_model, keras_model.name)
     self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, {keras_model.input_names[0]: x, keras_model.input_names[1]: y}, expected, self.model_files))
Exemplo n.º 30
0
 def test_wavenet(self):
     K.clear_session()
     keras_model = get_basic_generative_model(128)
     data = np.random.rand(2, 128, 1).astype(np.float32)
     expected = keras_model.predict(data)
     onnx_model = keras2onnx.convert_keras(keras_model, keras_model.name)
     self.assertTrue(
         run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files))