Exemple #1
0
    def test_music_generation(self):
        K.clear_session()
        model, time_model, note_model = build_models()

        batch_size = 2
        data_notes = np.random.rand(batch_size, SEQ_LEN, NUM_NOTES,
                                    NOTE_UNITS).astype(np.float32)
        data_beat = np.random.rand(batch_size, SEQ_LEN,
                                   NOTES_PER_BAR).astype(np.float32)
        data_style = np.random.rand(batch_size, SEQ_LEN,
                                    NUM_STYLES).astype(np.float32)
        data_chosen = np.random.rand(batch_size, SEQ_LEN, NUM_NOTES,
                                     NOTE_UNITS).astype(np.float32)

        expected = model.predict(
            [data_notes, data_chosen, data_beat, data_style])
        onnx_model = keras2onnx.convert_keras(model, model.name)
        self.assertTrue(
            run_keras_and_ort(
                onnx_model.graph.name, onnx_model, model, {
                    model.input_names[0]: data_notes,
                    model.input_names[1]: data_chosen,
                    model.input_names[2]: data_beat,
                    model.input_names[3]: data_style
                }, expected, self.model_files))

        expected = time_model.predict([data_notes, data_beat, data_style])
        onnx_model = keras2onnx.convert_keras(time_model, time_model.name)
        self.assertTrue(
            run_keras_and_ort(
                onnx_model.graph.name, onnx_model, time_model, {
                    time_model.input_names[0]: data_notes,
                    time_model.input_names[1]: data_beat,
                    time_model.input_names[2]: data_style
                }, expected, self.model_files))

        data_notes = np.random.rand(batch_size, 1, NUM_NOTES,
                                    TIME_AXIS_UNITS).astype(np.float32)
        data_chosen = np.random.rand(batch_size, 1, NUM_NOTES,
                                     NOTE_UNITS).astype(np.float32)
        data_style = np.random.rand(batch_size, 1,
                                    NUM_STYLES).astype(np.float32)
        expected = note_model.predict([data_notes, data_chosen, data_style])
        onnx_model = keras2onnx.convert_keras(note_model, note_model.name)
        self.assertTrue(
            run_keras_and_ort(
                onnx_model.graph.name, onnx_model, note_model, {
                    note_model.input_names[0]: data_notes,
                    note_model.input_names[1]: data_chosen,
                    note_model.input_names[2]: data_style
                }, expected, self.model_files))
Exemple #2
0
 def test_CNN_LSTM(self):
     K.clear_session()
     max_len = 20
     vocab_size = 50
     lstm_output_size = 70
     embedding_size = 100
     model = Sequential()
     model.add(
         Embedding(input_dim=vocab_size,
                   input_length=max_len,
                   output_dim=embedding_size))
     model.add(SpatialDropout1D(0.2))
     model.add(
         Conv1D(filters=256,
                kernel_size=5,
                padding='same',
                activation='relu'))
     model.add(MaxPooling1D(pool_size=4))
     model.add(LSTM(lstm_output_size))
     model.add(Dense(units=30, activation='softmax'))
     data = np.random.rand(2, max_len).astype(np.float32)
     expected = model.predict(data)
     onnx_model = keras2onnx.convert_keras(model, model.name)
     self.assertTrue(
         run_keras_and_ort(onnx_model.graph.name, onnx_model, model, data,
                           expected, self.model_files))
Exemple #3
0
 def test_ecg_classification(self):
     model = Sequential()
     model.add(Conv2D(64, (3, 3), strides=(1, 1), input_shape=[128, 128, 3], kernel_initializer='glorot_uniform'))
     model.add(keras.layers.ELU())
     model.add(BatchNormalization())
     model.add(Conv2D(64, (3, 3), strides=(1, 1), kernel_initializer='glorot_uniform'))
     model.add(keras.layers.ELU())
     model.add(BatchNormalization())
     model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
     model.add(Conv2D(128, (3, 3), strides=(1, 1), kernel_initializer='glorot_uniform'))
     model.add(keras.layers.ELU())
     model.add(BatchNormalization())
     model.add(Conv2D(128, (3, 3), strides=(1, 1), kernel_initializer='glorot_uniform'))
     model.add(keras.layers.ELU())
     model.add(BatchNormalization())
     model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
     model.add(Conv2D(256, (3, 3), strides=(1, 1), kernel_initializer='glorot_uniform'))
     model.add(keras.layers.ELU())
     model.add(BatchNormalization())
     model.add(Conv2D(256, (3, 3), strides=(1, 1), kernel_initializer='glorot_uniform'))
     model.add(keras.layers.ELU())
     model.add(BatchNormalization())
     model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
     model.add(Flatten())
     model.add(Dense(2048))
     model.add(keras.layers.ELU())
     model.add(BatchNormalization())
     model.add(Dropout(0.5))
     model.add(Dense(7, activation='softmax'))
     onnx_model = keras2onnx.convert_keras(model, model.name)
     data = np.random.rand(2, 128, 128, 3).astype(np.float32)
     expected = model.predict(data)
     self.assertTrue(
         run_keras_and_ort(onnx_model.graph.name, onnx_model, model, data, expected, self.model_files))
Exemple #4
0
    def test_lstm_fcn(self):
        MAX_SEQUENCE_LENGTH = 176
        NUM_CELLS = 8
        NB_CLASS = 37
        ip = Input(shape=(1, MAX_SEQUENCE_LENGTH))

        x = LSTM(NUM_CELLS)(ip)
        x = Dropout(0.8)(x)

        y = Permute((2, 1))(ip)
        y = Conv1D(128, 8, padding='same', kernel_initializer='he_uniform')(y)
        y = BatchNormalization()(y)
        y = Activation('relu')(y)

        y = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(y)
        y = BatchNormalization()(y)
        y = Activation('relu')(y)

        y = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(y)
        y = BatchNormalization()(y)
        y = Activation('relu')(y)

        y = GlobalAveragePooling1D()(y)

        x = concatenate([x, y])

        out = Dense(NB_CLASS, activation='softmax')(x)

        model = Model(ip, out)
        onnx_model = keras2onnx.convert_keras(model, model.name)
        batch_size = 2
        data = np.random.rand(batch_size, 1, MAX_SEQUENCE_LENGTH).astype(np.float32).reshape(batch_size, 1, MAX_SEQUENCE_LENGTH)
        expected = model.predict(data)
        self.assertTrue(run_keras_and_ort(onnx_model.graph.name, onnx_model, model, data, expected, self.model_files))
 def test_DPPG_actor(self):
     K.clear_session()
     env_dim = (2, 3)
     act_dim = 5
     act_range = 4
     inp = Input(shape=env_dim)
     #
     x = Dense(256, activation='relu')(inp)
     x = GaussianNoise(1.0)(x)
     #
     x = Flatten()(x)
     x = Dense(128, activation='relu')(x)
     x = GaussianNoise(1.0)(x)
     #
     out = Dense(act_dim,
                 activation='tanh',
                 kernel_initializer=RandomUniform())(x)
     out = Lambda(lambda i: i * act_range)(out)
     #
     keras_model = Model(inp, out)
     data = np.random.rand(1000, 2, 3).astype(np.float32)
     expected = keras_model.predict(data)
     onnx_model = keras2onnx.convert_keras(keras_model, keras_model.name)
     self.assertTrue(
         run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model,
                           data, expected, self.model_files))
 def test_name_entity_recognition(self):
     K.clear_session()
     words_input = Input(shape=(None,), dtype='int32', name='words_input')
     words = Embedding(input_dim=10, output_dim=20,
                       weights=None, trainable=False)(words_input)
     casing_input = Input(shape=(None,), dtype='int32', name='casing_input')
     casing = Embedding(output_dim=20, input_dim=12,
                        weights=None, trainable=False)(casing_input)
     character_input = Input(shape=(None, 52,), name='char_input')
     embed_char_out = TimeDistributed(
         Embedding(26, 20),
         name='char_embedding')(character_input)
     dropout = Dropout(0.5)(embed_char_out)
     conv1d_out = TimeDistributed(Conv1D(kernel_size=3, filters=30, padding='same', activation='tanh', strides=1))(
         dropout)
     maxpool_out = TimeDistributed(MaxPooling1D(52))(conv1d_out)
     char = TimeDistributed(Flatten())(maxpool_out)
     char = Dropout(0.5)(char)
     output = concatenate([words, casing, char])
     output = Bidirectional(LSTM(200, return_sequences=True, dropout=0.50, recurrent_dropout=0.25))(output)
     output = TimeDistributed(Dense(35, activation='softmax'))(output)
     keras_model = Model(inputs=[words_input, casing_input, character_input], outputs=[output])
     batch_size = 100
     data1 = np.random.randint(5, 10, size=(batch_size, 6)).astype(np.int32)
     data2 = np.random.randint(5, 10, size=(batch_size, 6)).astype(np.int32)
     data3 = np.random.rand(batch_size, 6, 52).astype(np.float32)
     expected = keras_model.predict([data1, data2, data3])
     onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
     self.assertTrue(
         run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model,
                           {keras_model.input_names[0]: data1,
                            keras_model.input_names[1]: data2,
                            keras_model.input_names[2]: data3}, expected, self.model_files))
    def test_NBeats(self):
        K.clear_session()
        num_samples, time_steps, input_dim, output_dim = 50_000, 10, 1, 1

        # Definition of the model.
        keras_model = NBeatsNet(backcast_length=time_steps,
                                forecast_length=output_dim,
                                stack_types=(NBeatsNet.GENERIC_BLOCK,
                                             NBeatsNet.GENERIC_BLOCK),
                                nb_blocks_per_stack=2,
                                thetas_dim=(4, 4),
                                share_weights_in_stack=True,
                                hidden_layer_units=64)
        data = np.random.rand(num_samples, time_steps,
                              input_dim).astype(np.float32)
        expected = keras_model.predict(data)
        onnx_model = keras2onnx.convert_keras(keras_model, keras_model.name)
        self.assertTrue(
            run_keras_and_ort(onnx_model.graph.name,
                              onnx_model,
                              keras_model,
                              data,
                              expected,
                              self.model_files,
                              compare_perf=True))
Exemple #8
0
    def test_DDQN(self):
        K.clear_session()
        state_dim = (2, 3)
        action_dim = 5
        inp = Input(shape=(state_dim))

        # Determine whether we are dealing with an image input (Atari) or not
        if (len(state_dim) > 2):
            inp = Input((state_dim[1:]))
            x = conv_block(inp, 32, (2, 2), 8)
            x = conv_block(x, 64, (2, 2), 4)
            x = conv_block(x, 64, (2, 2), 3)
            x = Flatten()(x)
            x = Dense(256, activation='relu')(x)
        else:
            x = Flatten()(inp)
            x = Dense(64, activation='relu')(x)
            x = Dense(64, activation='relu')(x)

        x = Dense(action_dim + 1, activation='linear')(x)
        x = Lambda(lambda i: K.expand_dims(i[:,0],-1) + i[:,1:] - K.mean(i[:,1:], keepdims=True),
                   output_shape=(action_dim,))(x)
        keras_model = Model(inp, x)
        data = np.random.rand(1000, 2, 3).astype(np.float32)
        expected = keras_model.predict(data)
        onnx_model = keras2onnx.convert_keras(keras_model, keras_model.name)
        self.assertTrue(
            run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected,
                              self.model_files, rtol=1e-2, atol=2e-2))
    def test_MLSTM_FCN(self):
        K.clear_session()
        ip = Input(shape=(MAX_NB_VARIABLES, MAX_TIMESTEPS))

        x = Masking()(ip)
        x = LSTM(8)(x)
        x = Dropout(0.8)(x)

        y = Permute((2, 1))(ip)
        y = Conv1D(128, 8, padding='same', kernel_initializer='he_uniform')(y)
        y = BatchNormalization()(y)
        y = Activation('relu')(y)
        y = squeeze_excite_block(y)

        y = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(y)
        y = BatchNormalization()(y)
        y = Activation('relu')(y)
        y = squeeze_excite_block(y)

        y = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(y)
        y = BatchNormalization()(y)
        y = Activation('relu')(y)

        y = GlobalAveragePooling1D()(y)

        x = concatenate([x, y])
        out = Dense(NB_CLASS, activation='softmax')(x)
        keras_model = Model(ip, out)
        data = np.random.rand(2, MAX_NB_VARIABLES, MAX_TIMESTEPS).astype(np.float32)
        expected = keras_model.predict(data)
        onnx_model = keras2onnx.convert_keras(keras_model, keras_model.name)
        self.assertTrue(
            run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files))
Exemple #10
0
    def test_keras_resnet_batchnormalization(self):
        N, C, H, W = 2, 3, 120, 120
        import keras_resnet

        model = Sequential()
        model.add(
            ZeroPadding2D(padding=((3, 3), (3, 3)),
                          input_shape=(H, W, C),
                          data_format='channels_last'))
        model.add(
            Conv2D(64,
                   kernel_size=(7, 7),
                   strides=(2, 2),
                   padding='valid',
                   dilation_rate=(1, 1),
                   use_bias=False,
                   data_format='channels_last'))
        model.add(keras_resnet.layers.BatchNormalization(freeze=True, axis=3))

        onnx_model = keras2onnx.convert_keras(model, model.name)
        data = np.random.rand(N, H, W, C).astype(np.float32).reshape(
            (N, H, W, C))
        expected = model.predict(data)
        self.assertTrue(
            run_keras_and_ort(onnx_model.graph.name, onnx_model, model, data,
                              expected, self.model_files))
    def test_deep_speech_2(self):
        K.clear_session()
        input_dim = 20
        output_dim = 10
        rnn_units = 800
        # Define input tensor [batch, time, features]
        input_tensor = layers.Input([None, input_dim], name='X')

        # Add 4th dimension [batch, time, frequency, channel]
        x = layers.Lambda(keras.backend.expand_dims,
                          arguments=dict(axis=-1))(input_tensor)
        x = layers.Conv2D(filters=32,
                          kernel_size=[11, 41],
                          strides=[2, 2],
                          padding='same',
                          use_bias=False,
                          name='conv_1')(x)
        x = layers.BatchNormalization(name='conv_1_bn')(x)
        x = layers.ReLU(name='conv_1_relu')(x)

        x = layers.Conv2D(filters=32,
                          kernel_size=[11, 21],
                          strides=[1, 2],
                          padding='same',
                          use_bias=False,
                          name='conv_2')(x)
        x = layers.BatchNormalization(name='conv_2_bn')(x)
        x = layers.ReLU(name='conv_2_relu')(x)
        # We need to squeeze to 3D tensor. Thanks to the stride in frequency
        # domain, we reduce the number of features four times for each channel.
        x = layers.Reshape([-1, input_dim//4*32])(x)

        for i in [1, 2, 3, 4, 5]:
            recurrent = layers.GRU(units=rnn_units,
                                   activation='tanh',
                                   recurrent_activation='sigmoid',
                                   use_bias=True,
                                   return_sequences=True,
                                   reset_after=True,
                                   name='gru_'+str(i))
            x = layers.Bidirectional(recurrent,
                                     name='bidirectional'+str(i),
                                     merge_mode='concat')(x)
            x = layers.Dropout(rate=0.5)(x) if i < 5 else x  # Only between

        # Return at each time step logits along characters. Then CTC
        # computation is more stable, in contrast to the softmax.
        x = layers.TimeDistributed(layers.Dense(units=rnn_units*2), name='dense_1')(x)
        x = layers.ReLU(name='dense_1_relu')(x)
        x = layers.Dropout(rate=0.5)(x)
        output_tensor = layers.TimeDistributed(layers.Dense(units=output_dim),
                                               name='dense_2')(x)

        model = keras.Model(input_tensor, output_tensor, name='DeepSpeech2')
        data = np.random.rand(2, 3, input_dim).astype(np.float32)
        expected = model.predict(data)
        onnx_model = keras2onnx.convert_keras(model, model.name)
        self.assertTrue(
            run_keras_and_ort(onnx_model.graph.name, onnx_model, model, data, expected, self.model_files))
Exemple #12
0
 def test_NASNetMobile(self):
     K.clear_session()
     keras_model = NASNetMobile()
     data = np.random.rand(2, 224, 224, 3).astype(np.float32)
     expected = keras_model.predict(data)
     onnx_model = keras2onnx.convert_keras(keras_model, keras_model.name)
     self.assertTrue(
         run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files))
 def test_Deeplab_v3(self):
     K.clear_session()
     keras_model = Deeplabv3(weights=None)
     data = np.random.rand(2, 512, 512, 3).astype(np.float32)
     expected = keras_model.predict(data)
     onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
     self.assertTrue(
         run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files))
Exemple #14
0
 def test_PRN_Separate(self):
     K.clear_session()
     keras_model = PRN_Seperate(28, 18, 15)
     data = np.random.rand(2, 28, 18, 17).astype(np.float32)
     expected = keras_model.predict(data)
     onnx_model = keras2onnx.convert_keras(keras_model, keras_model.name)
     self.assertTrue(
         run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files))
Exemple #15
0
 def test_SE_InceptionResNetV2(self):
     K.clear_session()
     keras_model = SEInceptionResNetV2()
     data = np.random.rand(2, 128, 128, 3).astype(np.float32)
     expected = keras_model.predict(data)
     onnx_model = keras2onnx.convert_keras(keras_model, keras_model.name)
     self.assertTrue(
         run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files))
 def test_DeepSpeaker(self):
     K.clear_session()
     keras_model = DeepSpeakerModel(batch_input_shape=(None, 32, 64, 4), include_softmax=True, num_speakers_softmax=10).keras_model()
     data = np.random.rand(2, 32, 64, 4).astype(np.float32)
     expected = keras_model.predict(data)
     onnx_model = keras2onnx.convert_keras(keras_model, keras_model.name)
     self.assertTrue(
         run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files))
 def test_wide_residual_network(self):
     K.clear_session()
     keras_model = create_wide_residual_network(input_dim=(32, 32, 3))
     data = np.random.rand(200, 32, 32, 3).astype(np.float32)
     expected = keras_model.predict(data)
     onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
     self.assertTrue(
         run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files))
Exemple #18
0
 def test_series_net(self):
     K.clear_session()
     keras_model = DC_CNN_Model(20)
     data = np.random.rand(2000, 20, 1).astype(np.float32)
     expected = keras_model.predict(data)
     onnx_model = keras2onnx.convert_keras(keras_model, keras_model.name)
     self.assertTrue(
         run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files, compare_perf=True))
Exemple #19
0
 def test_wavenet(self):
     K.clear_session()
     keras_model = get_basic_generative_model(128)
     data = np.random.rand(2, 128, 1).astype(np.float32)
     expected = keras_model.predict(data)
     onnx_model = keras2onnx.convert_keras(keras_model, keras_model.name)
     self.assertTrue(
         run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files))
 def test_PlainNet(self):
     K.clear_session()
     keras_model = PlainNet(100)
     data = np.random.rand(200, 32, 32, 3).astype(np.float32)
     expected = keras_model.predict(data)
     onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
     self.assertTrue(
         run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files))
 def test_non_local_nets(self):
     K.clear_session()
     for keras_model in [NonLocalResNet18((128, 160, 3), classes=10),
                         NonLocalResNet50((128, 160, 3), classes=10)]:
         data = np.random.rand(2, 128, 160, 3).astype(np.float32)
         expected = keras_model.predict(data)
         onnx_model = keras2onnx.convert_keras(keras_model, keras_model.name)
         self.assertTrue(
             run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files))
Exemple #22
0
    def test_chatbot(self):
        K.clear_session()
        vocabulary_size = 1085
        embedding_dim = int(pow(vocabulary_size, 1.0 / 4))
        latent_dim = embedding_dim * 40
        encoder_inputs = Input(shape=(None, ), name='encoder_input')
        encoder_embedding = Embedding(vocabulary_size,
                                      embedding_dim,
                                      mask_zero=True,
                                      name='encoder_Embedding')(encoder_inputs)
        encoder = Bidirectional(LSTM(latent_dim,
                                     return_sequences=True,
                                     return_state=True,
                                     dropout=0.5),
                                name='encoder_BiLSTM')
        encoder_outputs, fw_state_h, fw_state_c, bw_state_h, bw_state_c = encoder(
            encoder_embedding)
        state_h = Concatenate(axis=-1,
                              name='encoder_state_h')([fw_state_h, bw_state_h])
        state_c = Concatenate(axis=-1,
                              name='encoder_state_c')([fw_state_c, bw_state_c])
        encoder_states = [state_h, state_c]

        decoder_inputs = Input(shape=(None, ), name='decoder_input')
        decoder_embedding = Embedding(vocabulary_size,
                                      embedding_dim,
                                      mask_zero=True,
                                      name='decoder_embedding')(decoder_inputs)
        decoder_lstm = LSTM(latent_dim * 2,
                            return_sequences=True,
                            return_state=True,
                            name='decoder_LSTM',
                            dropout=0.5)
        decoder_outputs, _, _ = decoder_lstm(decoder_embedding,
                                             initial_state=encoder_states)

        attention = Dense(1, activation='tanh')(encoder_outputs)
        attention = Flatten()(attention)
        attention = Activation('softmax')(attention)
        attention = RepeatVector(latent_dim * 2)(attention)
        attention = Permute([2, 1])(attention)
        sent_dense = Multiply()([decoder_outputs, attention])

        decoder_dense = Dense(vocabulary_size,
                              activation='softmax',
                              name='dense_layer')
        decoder_outputs = decoder_dense(sent_dense)
        keras_model = Model([encoder_inputs, decoder_inputs], decoder_outputs)
        data1 = np.random.rand(2, 12).astype(np.float32)
        data2 = np.random.rand(2, 12).astype(np.float32)
        expected = keras_model.predict([data1, data2])
        onnx_model = mock_keras2onnx.convert_keras(keras_model,
                                                   keras_model.name)
        self.assertTrue(
            run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model,
                              [data1, data2], expected, self.model_files))
 def test_DistilledResNetSR(self):
     K.clear_session()
     model_type = DistilledResNetSR(2.0)
     keras_model = model_type.create_model()
     data = np.random.rand(2, 32, 32, 3).astype(np.float32)
     expected = keras_model.predict(data)
     onnx_model = keras2onnx.convert_keras(keras_model, keras_model.name)
     self.assertTrue(
         run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model,
                           data, expected, self.model_files))
 def test_DPN92(self):
     K.clear_session()
     keras_model = DPN92(input_shape=(224, 224, 3))
     data = np.random.rand(2, 224, 224, 3).astype(np.float32)
     expected = keras_model.predict(data)
     onnx_model = mock_keras2onnx.convert_keras(keras_model,
                                                keras_model.name)
     self.assertTrue(
         run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model,
                           data, expected, self.model_files))
 def test_GANImageSuperResolutionModel(self):
     K.clear_session()
     model_type = GANImageSuperResolutionModel(2.0)
     keras_model = model_type.create_model()
     data = np.random.rand(2, 32, 32, 3).astype(np.float32)
     expected = keras_model.predict(data)
     onnx_model = mock_keras2onnx.convert_keras(keras_model,
                                                keras_model.name)
     self.assertTrue(
         run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model,
                           data, expected, self.model_files))
 def test_DeepLabV3Plus(self):
     K.clear_session()
     keras_model = DeeplabV3_plus(input_height=224, input_width=224)
     data = np.random.rand(1, 224, 224, 3).astype(np.float32)
     expected = keras_model.predict(data)
     onnx_model = keras2onnx.convert_keras(keras_model, keras_model.name)
     self.assertTrue(
         run_keras_and_ort(onnx_model.graph.name,
                           onnx_model,
                           keras_model,
                           data,
                           expected,
                           self.model_files,
                           compare_perf=True))
 def test_ENet(self):
     K.clear_session()
     keras_model = ENet(80)
     data = np.random.rand(1, 256, 256, 3).astype(np.float32)
     expected = keras_model.predict(data)
     onnx_model = keras2onnx.convert_keras(keras_model, keras_model.name)
     self.assertTrue(
         run_keras_and_ort(onnx_model.graph.name,
                           onnx_model,
                           keras_model,
                           data,
                           expected,
                           self.model_files,
                           compare_perf=True))
    def test_deep_speech(self):
        K.clear_session()
        input_dim = 20
        output_dim = 10
        context = 7
        units = 1024
        dropouts = (0.1, 0.1, 0)

        # Define input tensor [batch, time, features]
        input_tensor = layers.Input([None, input_dim], name='X')

        # Add 4th dimension [batch, time, frequency, channel]
        x = layers.Lambda(keras.backend.expand_dims,
                          arguments=dict(axis=-1))(input_tensor)
        # Fill zeros around time dimension
        x = layers.ZeroPadding2D(padding=(context, 0))(x)
        # Convolve signal in time dim
        receptive_field = (2 * context + 1, input_dim)
        x = layers.Conv2D(filters=units, kernel_size=receptive_field)(x)
        # Squeeze into 3rd dim array
        x = layers.Lambda(keras.backend.squeeze, arguments=dict(axis=2))(x)
        # Add non-linearity
        x = layers.ReLU(max_value=20)(x)
        # Use dropout as regularization
        x = layers.Dropout(rate=dropouts[0])(x)

        # 2nd and 3rd FC layers do a feature extraction base on a narrow
        # context of convolutional layer
        x = layers.TimeDistributed(layers.Dense(units))(x)
        x = layers.ReLU(max_value=20)(x)
        x = layers.Dropout(rate=dropouts[1])(x)

        x = layers.TimeDistributed(layers.Dense(units))(x)
        x = layers.ReLU(max_value=20)(x)
        x = layers.Dropout(rate=dropouts[2])(x)

        # Use recurrent layer to have a broader context
        x = layers.Bidirectional(layers.LSTM(units, return_sequences=True),
                                 merge_mode='sum')(x)

        # Return at each time step logits along characters. Then CTC
        # computation is more stable, in contrast to the softmax.
        output_tensor = layers.TimeDistributed(layers.Dense(output_dim))(x)
        model = keras.Model(input_tensor, output_tensor, name='DeepSpeech')
        data = np.random.rand(2, 3, input_dim).astype(np.float32)
        expected = model.predict(data)
        onnx_model = mock_keras2onnx.convert_keras(model, model.name)
        self.assertTrue(
            run_keras_and_ort(onnx_model.graph.name, onnx_model, model, data,
                              expected, self.model_files))
Exemple #29
0
 def test_tcn(self):
     from tcn import TCN
     batch_size, timesteps, input_dim = None, 20, 1
     actual_batch_size = 3
     i = Input(batch_shape=(batch_size, timesteps, input_dim))
     np.random.seed(1000)  # set the random seed to avoid the output result discrepancies.
     for return_sequences in [True, False]:
         o = TCN(return_sequences=return_sequences)(i)  # The TCN layers are here.
         o = Dense(1)(o)
         model = keras.models.Model(inputs=[i], outputs=[o])
         onnx_model = keras2onnx.convert_keras(model, model.name)
         data = np.random.rand(actual_batch_size, timesteps, input_dim).astype(np.float32).reshape((actual_batch_size, timesteps, input_dim))
         expected = model.predict(data)
         self.assertTrue(run_keras_and_ort(onnx_model.graph.name, onnx_model, model, data, expected, self.model_files))
Exemple #30
0
    def test_Multi_Channel_CNN(self):
        K.clear_session()
        embedding_size = 100
        cnn_filter_size = 32
        length = 20
        vocab_size = 50
        inputs1 = Input(shape=(length, ))
        embedding1 = Embedding(vocab_size, embedding_size)(inputs1)
        conv1 = Conv1D(filters=cnn_filter_size,
                       kernel_size=4,
                       activation='relu')(embedding1)
        drop1 = Dropout(0.5)(conv1)
        pool1 = MaxPooling1D(pool_size=2)(drop1)
        flat1 = Flatten()(pool1)

        inputs2 = Input(shape=(length, ))
        embedding2 = Embedding(vocab_size, embedding_size)(inputs2)
        conv2 = Conv1D(filters=cnn_filter_size,
                       kernel_size=6,
                       activation='relu')(embedding2)
        drop2 = Dropout(0.5)(conv2)
        pool2 = MaxPooling1D(pool_size=2)(drop2)
        flat2 = Flatten()(pool2)

        inputs3 = Input(shape=(length, ))
        embedding3 = Embedding(vocab_size, embedding_size)(inputs3)
        conv3 = Conv1D(filters=cnn_filter_size,
                       kernel_size=8,
                       activation='relu')(embedding3)
        drop3 = Dropout(0.5)(conv3)
        pool3 = MaxPooling1D(pool_size=2)(drop3)
        flat3 = Flatten()(pool3)

        merged = concatenate([flat1, flat2, flat3])
        # interpretation
        dense1 = Dense(10, activation='relu')(merged)

        outputs = Dense(units=30, activation='softmax')(dense1)

        model = Model(inputs=[inputs1, inputs2, inputs3], outputs=outputs)
        batch_size = 2
        data0 = np.random.rand(batch_size, length).astype(np.float32)
        data1 = np.random.rand(batch_size, length).astype(np.float32)
        data2 = np.random.rand(batch_size, length).astype(np.float32)
        expected = model.predict([data0, data1, data2])
        onnx_model = keras2onnx.convert_keras(model, model.name)
        self.assertTrue(
            run_keras_and_ort(onnx_model.graph.name, onnx_model, model,
                              [data0, data1, data2], expected,
                              self.model_files))