def test_music_generation(self): K.clear_session() model, time_model, note_model = build_models() batch_size = 2 data_notes = np.random.rand(batch_size, SEQ_LEN, NUM_NOTES, NOTE_UNITS).astype(np.float32) data_beat = np.random.rand(batch_size, SEQ_LEN, NOTES_PER_BAR).astype(np.float32) data_style = np.random.rand(batch_size, SEQ_LEN, NUM_STYLES).astype(np.float32) data_chosen = np.random.rand(batch_size, SEQ_LEN, NUM_NOTES, NOTE_UNITS).astype(np.float32) expected = model.predict( [data_notes, data_chosen, data_beat, data_style]) onnx_model = mock_keras2onnx.convert_keras(model, model.name) self.assertTrue( run_keras_and_ort( onnx_model.graph.name, onnx_model, model, { model.input_names[0]: data_notes, model.input_names[1]: data_chosen, model.input_names[2]: data_beat, model.input_names[3]: data_style }, expected, self.model_files)) expected = time_model.predict([data_notes, data_beat, data_style]) onnx_model = mock_keras2onnx.convert_keras(time_model, time_model.name) self.assertTrue( run_keras_and_ort( onnx_model.graph.name, onnx_model, time_model, { time_model.input_names[0]: data_notes, time_model.input_names[1]: data_beat, time_model.input_names[2]: data_style }, expected, self.model_files)) data_notes = np.random.rand(batch_size, 1, NUM_NOTES, TIME_AXIS_UNITS).astype(np.float32) data_chosen = np.random.rand(batch_size, 1, NUM_NOTES, NOTE_UNITS).astype(np.float32) data_style = np.random.rand(batch_size, 1, NUM_STYLES).astype(np.float32) expected = note_model.predict([data_notes, data_chosen, data_style]) onnx_model = mock_keras2onnx.convert_keras(note_model, note_model.name) self.assertTrue( run_keras_and_ort( onnx_model.graph.name, onnx_model, note_model, { note_model.input_names[0]: data_notes, note_model.input_names[1]: data_chosen, note_model.input_names[2]: data_style }, expected, self.model_files))
def test_ecg_classification(self): model = Sequential() model.add(Conv2D(64, (3, 3), strides=(1, 1), input_shape=[128, 128, 3], kernel_initializer='glorot_uniform')) model.add(keras.layers.ELU()) model.add(BatchNormalization()) model.add(Conv2D(64, (3, 3), strides=(1, 1), kernel_initializer='glorot_uniform')) model.add(keras.layers.ELU()) model.add(BatchNormalization()) model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2))) model.add(Conv2D(128, (3, 3), strides=(1, 1), kernel_initializer='glorot_uniform')) model.add(keras.layers.ELU()) model.add(BatchNormalization()) model.add(Conv2D(128, (3, 3), strides=(1, 1), kernel_initializer='glorot_uniform')) model.add(keras.layers.ELU()) model.add(BatchNormalization()) model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2))) model.add(Conv2D(256, (3, 3), strides=(1, 1), kernel_initializer='glorot_uniform')) model.add(keras.layers.ELU()) model.add(BatchNormalization()) model.add(Conv2D(256, (3, 3), strides=(1, 1), kernel_initializer='glorot_uniform')) model.add(keras.layers.ELU()) model.add(BatchNormalization()) model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2))) model.add(Flatten()) model.add(Dense(2048)) model.add(keras.layers.ELU()) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Dense(7, activation='softmax')) onnx_model = mock_keras2onnx.convert_keras(model, model.name) data = np.random.rand(2, 128, 128, 3).astype(np.float32) expected = model.predict(data) self.assertTrue( run_keras_and_ort(onnx_model.graph.name, onnx_model, model, data, expected, self.model_files))
def test_lstm_fcn(self): MAX_SEQUENCE_LENGTH = 176 NUM_CELLS = 8 NB_CLASS = 37 ip = Input(shape=(1, MAX_SEQUENCE_LENGTH)) x = LSTM(NUM_CELLS)(ip) x = Dropout(0.8)(x) y = Permute((2, 1))(ip) y = Conv1D(128, 8, padding='same', kernel_initializer='he_uniform')(y) y = BatchNormalization()(y) y = Activation('relu')(y) y = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(y) y = BatchNormalization()(y) y = Activation('relu')(y) y = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(y) y = BatchNormalization()(y) y = Activation('relu')(y) y = GlobalAveragePooling1D()(y) x = concatenate([x, y]) out = Dense(NB_CLASS, activation='softmax')(x) model = Model(ip, out) onnx_model = mock_keras2onnx.convert_keras(model, model.name) batch_size = 2 data = np.random.rand(batch_size, 1, MAX_SEQUENCE_LENGTH).astype(np.float32).reshape(batch_size, 1, MAX_SEQUENCE_LENGTH) expected = model.predict(data) self.assertTrue(run_keras_and_ort(onnx_model.graph.name, onnx_model, model, data, expected, self.model_files))
def test_addition_rnn(self): # An implementation of sequence to sequence learning for performing addition # from https://github.com/keras-team/keras/blob/master/examples/addition_rnn.py DIGITS = 3 MAXLEN = DIGITS + 1 + DIGITS HIDDEN_SIZE = 128 BATCH_SIZE = 128 CHARS_LENGTH = 12 for RNN in [ keras.layers.LSTM, keras.layers.GRU, keras.layers.SimpleRNN ]: model = keras.models.Sequential() model.add(RNN(HIDDEN_SIZE, input_shape=(MAXLEN, CHARS_LENGTH))) model.add(keras.layers.RepeatVector(DIGITS + 1)) model.add(RNN(HIDDEN_SIZE, return_sequences=True)) model.add( keras.layers.TimeDistributed( keras.layers.Dense(CHARS_LENGTH, activation='softmax'))) onnx_model = mock_keras2onnx.convert_keras(model, model.name) x = np.random.rand(BATCH_SIZE, MAXLEN, CHARS_LENGTH).astype(np.float32) expected = model.predict(x) self.assertTrue( run_onnx_runtime(onnx_model.graph.name, onnx_model, x, expected, self.model_files))
def test_babi_rnn(self): # two recurrent neural networks based upon a story and a question. # from https://github.com/keras-team/keras/blob/master/examples/babi_rnn.py RNN = keras.layers.recurrent.LSTM EMBED_HIDDEN_SIZE = 50 SENT_HIDDEN_SIZE = 100 QUERY_HIDDEN_SIZE = 100 BATCH_SIZE = 32 story_maxlen = 15 vocab_size = 27 query_maxlen = 17 sentence = Input(shape=(story_maxlen, ), dtype='int32') encoded_sentence = Embedding(vocab_size, EMBED_HIDDEN_SIZE)(sentence) encoded_sentence = RNN(SENT_HIDDEN_SIZE)(encoded_sentence) question = Input(shape=(query_maxlen, ), dtype='int32') encoded_question = Embedding(vocab_size, EMBED_HIDDEN_SIZE)(question) encoded_question = RNN(QUERY_HIDDEN_SIZE)(encoded_question) merged = concatenate([encoded_sentence, encoded_question]) preds = Dense(vocab_size, activation='softmax')(merged) model = Model([sentence, question], preds) onnx_model = mock_keras2onnx.convert_keras(model, model.name) x = np.random.randint(5, 10, size=(BATCH_SIZE, story_maxlen)).astype(np.int32) y = np.random.randint(5, 10, size=(BATCH_SIZE, query_maxlen)).astype(np.int32) expected = model.predict([x, y]) self.assertTrue( run_onnx_runtime(onnx_model.graph.name, onnx_model, { model.input_names[0]: x, model.input_names[1]: y }, expected, self.model_files))
def test_name_entity_recognition(self): K.clear_session() words_input = Input(shape=(None,), dtype='int32', name='words_input') words = Embedding(input_dim=10, output_dim=20, weights=None, trainable=False)(words_input) casing_input = Input(shape=(None,), dtype='int32', name='casing_input') casing = Embedding(output_dim=20, input_dim=12, weights=None, trainable=False)(casing_input) character_input = Input(shape=(None, 52,), name='char_input') embed_char_out = TimeDistributed( Embedding(26, 20), name='char_embedding')(character_input) dropout = Dropout(0.5)(embed_char_out) conv1d_out = TimeDistributed(Conv1D(kernel_size=3, filters=30, padding='same', activation='tanh', strides=1))( dropout) maxpool_out = TimeDistributed(MaxPooling1D(52))(conv1d_out) char = TimeDistributed(Flatten())(maxpool_out) char = Dropout(0.5)(char) output = concatenate([words, casing, char]) output = Bidirectional(LSTM(200, return_sequences=True, dropout=0.50, recurrent_dropout=0.25))(output) output = TimeDistributed(Dense(35, activation='softmax'))(output) keras_model = Model(inputs=[words_input, casing_input, character_input], outputs=[output]) batch_size = 100 data1 = np.random.randint(5, 10, size=(batch_size, 6)).astype(np.int32) data2 = np.random.randint(5, 10, size=(batch_size, 6)).astype(np.int32) data3 = np.random.rand(batch_size, 6, 52).astype(np.float32) expected = keras_model.predict([data1, data2, data3]) onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name) self.assertTrue( run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, {keras_model.input_names[0]: data1, keras_model.input_names[1]: data2, keras_model.input_names[2]: data3}, expected, self.model_files))
def test_DPPG_actor(self): K.clear_session() env_dim = (2, 3) act_dim = 5 act_range = 4 inp = Input(shape=env_dim) # x = Dense(256, activation='relu')(inp) x = GaussianNoise(1.0)(x) # x = Flatten()(x) x = Dense(128, activation='relu')(x) x = GaussianNoise(1.0)(x) # out = Dense(act_dim, activation='tanh', kernel_initializer=RandomUniform())(x) out = Lambda(lambda i: i * act_range)(out) # keras_model = Model(inp, out) data = np.random.rand(1000, 2, 3).astype(np.float32) expected = keras_model.predict(data) onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name) self.assertTrue( run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files))
def test_TFGPT2(self): if enable_full_transformer_test: from transformers import GPT2Config, TFGPT2Model, TFGPT2LMHeadModel, TFGPT2DoubleHeadsModel model_list = [ TFGPT2Model, TFGPT2LMHeadModel, TFGPT2DoubleHeadsModel ] else: from transformers import GPT2Config, TFGPT2Model model_list = [TFGPT2Model] # pretrained_weights = 'gpt2' tokenizer_file = 'gpt2_gpt2.pickle' tokenizer = self._get_tokenzier(tokenizer_file) text, inputs, inputs_onnx = self._prepare_inputs(tokenizer) config = GPT2Config() for model_instance_ in model_list: keras.backend.clear_session() model = model_instance_(config) model._set_inputs(inputs) predictions_original = model(inputs) predictions = [predictions_original[0]] + list( v_.numpy() for v_ in predictions_original[1]) onnx_model = mock_keras2onnx.convert_keras(model, model.name) self.assertTrue( run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files, rtol=1.e-2, atol=1.e-4))
def test_TFXLNet(self): if enable_full_transformer_test: from transformers import XLNetConfig, TFXLNetModel, TFXLNetLMHeadModel, TFXLNetForSequenceClassification, \ TFXLNetForTokenClassification, TFXLNetForQuestionAnsweringSimple, XLNetTokenizer model_list = [TFXLNetModel, TFXLNetLMHeadModel, TFXLNetForSequenceClassification, \ TFXLNetForTokenClassification, TFXLNetForQuestionAnsweringSimple] else: from transformers import XLNetConfig, TFXLNetModel, XLNetTokenizer model_list = [TFXLNetModel] # XLNetTokenizer need SentencePiece, so the pickle file does not work here. tokenizer = XLNetTokenizer.from_pretrained('xlnet-large-cased') config = XLNetConfig(n_layer=2) # The model with input mask has MatrixDiagV3 which is not a registered function/op token = np.asarray(tokenizer.encode(self.text_str, add_special_tokens=True), dtype=np.int32) inputs_onnx = {'input_1': np.expand_dims(token, axis=0)} inputs = tf.constant(token)[None, :] # Batch size 1 for model_instance_ in model_list: keras.backend.clear_session() model = model_instance_(config) predictions = model.predict(inputs) onnx_model = mock_keras2onnx.convert_keras(model) self.assertTrue( run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files, rtol=1.e-2, atol=1.e-4))
def test_DeepFace(self): base_model = Sequential() base_model.add( Convolution2D(32, (11, 11), activation='relu', name='C1', input_shape=(152, 152, 3))) base_model.add( MaxPooling2D(pool_size=3, strides=2, padding='same', name='M2')) base_model.add(Convolution2D(16, (9, 9), activation='relu', name='C3')) base_model.add( LocallyConnected2D(16, (9, 9), activation='relu', name='L4')) base_model.add( LocallyConnected2D(16, (7, 7), strides=2, activation='relu', name='L5')) base_model.add( LocallyConnected2D(16, (5, 5), activation='relu', name='L6')) base_model.add(Flatten(name='F0')) base_model.add(Dense(4096, activation='relu', name='F7')) base_model.add(Dropout(rate=0.5, name='D0')) base_model.add(Dense(8631, activation='softmax', name='F8')) data = np.random.rand(1, 152, 152, 3).astype(np.float32) expected = base_model.predict(data) onnx_model = mock_keras2onnx.convert_keras(base_model, base_model.name, debug_mode=True) self.assertTrue( run_onnx_runtime(onnx_model.graph.name, onnx_model, data, expected, self.model_files))
def test_imdb_cnn_lstm(self): # A recurrent convolutional network on the IMDB sentiment classification task. # from https://github.com/keras-team/keras/blob/master/examples/imdb_cnn_lstm.py max_features = 20000 maxlen = 100 embedding_size = 128 kernel_size = 5 filters = 64 pool_size = 4 lstm_output_size = 70 batch_size = 30 model = Sequential() model.add(Embedding(max_features, embedding_size, input_length=maxlen)) model.add(Dropout(0.25)) model.add( Conv1D(filters, kernel_size, padding='valid', activation='relu', strides=1)) model.add(MaxPooling1D(pool_size=pool_size)) model.add(LSTM(lstm_output_size)) model.add(Dense(1)) model.add(Activation('sigmoid')) onnx_model = mock_keras2onnx.convert_keras(model, model.name) x = np.random.rand(batch_size, maxlen).astype(np.float32) expected = model.predict(x) self.assertTrue( run_onnx_runtime(onnx_model.graph.name, onnx_model, x, expected, self.model_files))
def test_Deeplab_v3(self): K.clear_session() keras_model = Deeplabv3(weights=None) data = np.random.rand(2, 512, 512, 3).astype(np.float32) expected = keras_model.predict(data) onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name) self.assertTrue( run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files))
def test_BIGAN(self): keras_model = BIGAN().bigan_generator batch = 5 x = np.random.rand(batch, 100).astype(np.float32) y = np.random.rand(batch, 28, 28, 1).astype(np.float32) expected = keras_model.predict([x, y]) onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name) self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, {keras_model.input_names[0]: x, keras_model.input_names[1]: y}, expected, self.model_files))
def test_wide_residual_network(self): K.clear_session() keras_model = create_wide_residual_network(input_dim=(32, 32, 3)) data = np.random.rand(200, 32, 32, 3).astype(np.float32) expected = keras_model.predict(data) onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name) self.assertTrue( run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files))
def test_inception_v4(self): K.clear_session() keras_model = create_inception_v4() data = np.random.rand(2, 299, 299, 3).astype(np.float32) expected = keras_model.predict(data) onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name) self.assertTrue( run_onnx_runtime(onnx_model.graph.name, onnx_model, data, expected, self.model_files))
def test_PlainNet(self): K.clear_session() keras_model = PlainNet(100) data = np.random.rand(200, 32, 32, 3).astype(np.float32) expected = keras_model.predict(data) onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name) self.assertTrue( run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files))
def test_InfoGAN(self): keras_model = INFOGAN().combined x = np.random.rand(5, 72).astype(np.float32) expected = keras_model.predict(x) onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name) self.assertTrue( run_onnx_runtime(onnx_model.graph.name, onnx_model, x, expected, self.model_files))
def test_AdversarialAutoencoder(self): keras_model = AdversarialAutoencoder().adversarial_autoencoder x = np.random.rand(5, 28, 28, 1).astype(np.float32) expected = keras_model.predict(x) onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name) self.assertTrue( run_onnx_runtime(onnx_model.graph.name, onnx_model, x, expected, self.model_files))
def test_chatbot(self): K.clear_session() vocabulary_size = 1085 embedding_dim = int(pow(vocabulary_size, 1.0 / 4)) latent_dim = embedding_dim * 40 encoder_inputs = Input(shape=(None, ), name='encoder_input') encoder_embedding = Embedding(vocabulary_size, embedding_dim, mask_zero=True, name='encoder_Embedding')(encoder_inputs) encoder = Bidirectional(LSTM(latent_dim, return_sequences=True, return_state=True, dropout=0.5), name='encoder_BiLSTM') encoder_outputs, fw_state_h, fw_state_c, bw_state_h, bw_state_c = encoder( encoder_embedding) state_h = Concatenate(axis=-1, name='encoder_state_h')([fw_state_h, bw_state_h]) state_c = Concatenate(axis=-1, name='encoder_state_c')([fw_state_c, bw_state_c]) encoder_states = [state_h, state_c] decoder_inputs = Input(shape=(None, ), name='decoder_input') decoder_embedding = Embedding(vocabulary_size, embedding_dim, mask_zero=True, name='decoder_embedding')(decoder_inputs) decoder_lstm = LSTM(latent_dim * 2, return_sequences=True, return_state=True, name='decoder_LSTM', dropout=0.5) decoder_outputs, _, _ = decoder_lstm(decoder_embedding, initial_state=encoder_states) attention = Dense(1, activation='tanh')(encoder_outputs) attention = Flatten()(attention) attention = Activation('softmax')(attention) attention = RepeatVector(latent_dim * 2)(attention) attention = Permute([2, 1])(attention) sent_dense = Multiply()([decoder_outputs, attention]) decoder_dense = Dense(vocabulary_size, activation='softmax', name='dense_layer') decoder_outputs = decoder_dense(sent_dense) keras_model = Model([encoder_inputs, decoder_inputs], decoder_outputs) data1 = np.random.rand(2, 12).astype(np.float32) data2 = np.random.rand(2, 12).astype(np.float32) expected = keras_model.predict([data1, data2]) onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name) self.assertTrue( run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, [data1, data2], expected, self.model_files))
def run_image(model, model_files, img_path, model_name='onnx_conversion', rtol=1.e-3, atol=1.e-5, color_mode="rgb", target_size=224, compare_perf=False): if is_tf2: preprocess_input = keras.applications.imagenet_utils.preprocess_input else: preprocess_input = keras.applications.resnet50.preprocess_input image = keras.preprocessing.image try: if not isinstance(target_size, tuple): target_size = (target_size, target_size) if is_keras_older_than("2.2.3"): # color_mode is not supported in old keras version img = image.load_img(img_path, target_size=target_size) else: img = image.load_img(img_path, color_mode=color_mode, target_size=target_size) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) if color_mode == "rgb": x = preprocess_input(x) except FileNotFoundError: return False, 'The image data does not exist.' msg = '' preds = None try: preds = model.predict(x) if compare_perf: count = 10 time_start = time.time() for i in range(count): model.predict(x) time_end = time.time() print('avg keras time =' + str((time_end - time_start) / count)) except RuntimeError: msg = 'keras prediction throws an exception for model ' + model.name + ', skip comparison.' onnx_model = mock_keras2onnx.convert_keras(model, model.name) res = run_onnx_runtime(model_name, onnx_model, x, preds, model_files, rtol=rtol, atol=atol, compare_perf=compare_perf) return res, msg
def test_PRN_Separate(self): K.clear_session() keras_model = PRN_Seperate(28, 18, 15) data = np.random.rand(2, 28, 18, 17).astype(np.float32) expected = keras_model.predict(data) onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name) self.assertTrue( run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files))
def test_wavenet(self): K.clear_session() keras_model = get_basic_generative_model(128) data = np.random.rand(2, 128, 1).astype(np.float32) expected = keras_model.predict(data) onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name) self.assertTrue( run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files))
def test_DPN92(self): K.clear_session() keras_model = DPN92(input_shape=(224, 224, 3)) data = np.random.rand(2, 224, 224, 3).astype(np.float32) expected = keras_model.predict(data) onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name) self.assertTrue( run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files))
def test_SE_InceptionResNetV2(self): K.clear_session() keras_model = SEInceptionResNetV2() data = np.random.rand(2, 128, 128, 3).astype(np.float32) expected = keras_model.predict(data) onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name) self.assertTrue( run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files))
def test_DistilledResNetSR(self): K.clear_session() model_type = DistilledResNetSR(2.0) keras_model = model_type.create_model() data = np.random.rand(2, 32, 32, 3).astype(np.float32) expected = keras_model.predict(data) onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name) self.assertTrue( run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files))
def test_DeepSpeaker(self): K.clear_session() keras_model = DeepSpeakerModel(batch_input_shape=(None, 32, 64, 4), include_softmax=True, num_speakers_softmax=10).keras_model() data = np.random.rand(2, 32, 64, 4).astype(np.float32) expected = keras_model.predict(data) onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name) self.assertTrue( run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files))
def test_mask_rcnn(self): set_converter('CropAndResize', convert_tf_crop_and_resize) onnx_model = mock_keras2onnx.convert_keras(model.keras_model) import skimage img_path = os.path.join(os.path.dirname(__file__), '../data', 'street.jpg') image = skimage.io.imread(img_path) images = [image] case_name = 'mask_rcnn' if not os.path.exists(tmp_path): os.mkdir(tmp_path) temp_model_file = os.path.join(tmp_path, 'temp_' + case_name + '.onnx') onnx.save_model(onnx_model, temp_model_file) try: import onnxruntime sess = onnxruntime.InferenceSession(temp_model_file) except ImportError: return True # preprocessing molded_images, image_metas, windows = model.mold_inputs(images) anchors = model.get_anchors(molded_images[0].shape) anchors = np.broadcast_to(anchors, (model.config.BATCH_SIZE, ) + anchors.shape) expected = model.keras_model.predict([ molded_images.astype(np.float32), image_metas.astype(np.float32), anchors ]) actual = \ sess.run(None, {"input_image": molded_images.astype(np.float32), "input_anchors": anchors, "input_image_meta": image_metas.astype(np.float32)}) rtol = 1.e-3 atol = 1.e-6 compare_idx = [0, 3] res = all( np.allclose(expected[n_], actual[n_], rtol=rtol, atol=atol) for n_ in compare_idx) if res and temp_model_file not in self.model_files: # still keep the failed case files for the diagnosis. self.model_files.append(temp_model_file) if not res: for n_ in compare_idx: expected_list = expected[n_].flatten() actual_list = actual[n_].flatten() print_mismatches(case_name, n_, expected_list, actual_list, atol, rtol) self.assertTrue(res)
def test_CRAFT(self): # input_image = Input(shape=(None, None, 3)) -- Need fixed input shape input_image = Input(shape=(512, 512, 3)) region, affinity = VGG16_UNet(input_tensor=input_image, weights=None) keras_model = Model(input_image, [region, affinity], name='vgg16_unet') x = np.random.rand(1, 512, 512, 3).astype(np.float32) expected = keras_model.predict(x) onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name) self.assertTrue( run_onnx_runtime(onnx_model.graph.name, onnx_model, x, expected, self.model_files))
def test_Pix2Pix(self): keras_model = Pix2Pix().combined batch = 5 x = np.random.rand(batch, 256, 256, 3).astype(np.float32) y = np.random.rand(batch, 256, 256, 3).astype(np.float32) expected = keras_model.predict([x, y]) onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name) self.assertTrue( run_onnx_runtime(onnx_model.graph.name, onnx_model, { keras_model.input_names[0]: x, keras_model.input_names[1]: y }, expected, self.model_files))
def test_NBeats(self): K.clear_session() num_samples, time_steps, input_dim, output_dim = 50000, 10, 1, 1 # Definition of the model. keras_model = NBeatsNet(backcast_length=time_steps, forecast_length=output_dim, stack_types=(NBeatsNet.GENERIC_BLOCK, NBeatsNet.GENERIC_BLOCK), nb_blocks_per_stack=2, thetas_dim=(4, 4), share_weights_in_stack=True, hidden_layer_units=64) data = np.random.rand(num_samples, time_steps, input_dim).astype(np.float32) expected = keras_model.predict(data) onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name) self.assertTrue( run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files))