コード例 #1
0
    def test_tflite_inference(self, feature_inputs):
        test_dir = 'non_semantic_speech_benchmark/data_prep/testdata'
        if feature_inputs:
            test_file = 'model1_woutfrontend.tflite'
        else:
            test_file = 'model1_wfrontend.tflite'
        tflite_model_path = os.path.join(absltest.get_default_test_srcdir(),
                                         test_dir, test_file)
        output_key = '0'
        interpreter = audio_to_embeddings_beam_utils._build_tflite_interpreter(
            tflite_model_path=tflite_model_path)

        model_input = np.zeros([32000], dtype=np.float32)
        sample_rate = 16000
        if feature_inputs:
            model_input = audio_to_embeddings_beam_utils._default_feature_fn(
                model_input, sample_rate)

        audio_to_embeddings_beam_utils._samples_to_embedding_tflite(
            model_input, sample_rate, interpreter, output_key)
コード例 #2
0
    def test_tflite_inference(self, feature_inputs):
        if feature_inputs:
            test_file = 'model1_woutfrontend.tflite'
        else:
            test_file = 'model1_wfrontend.tflite'
        tflite_model_path = os.path.join(absltest.get_default_test_srcdir(),
                                         TEST_DIR, test_file)
        output_key = '0'
        interpreter = audio_to_embeddings_beam_utils.build_tflite_interpreter(
            tflite_model_path=tflite_model_path)

        model_input = np.zeros([32000], dtype=np.float32)
        sample_rate = 16000
        if feature_inputs:
            model_input = audio_to_embeddings_beam_utils._default_feature_fn(
                model_input, sample_rate)
        else:
            model_input = np.expand_dims(model_input, axis=0)

        audio_to_embeddings_beam_utils.samples_to_embedding_tflite(
            model_input, sample_rate, interpreter, output_key, 'name')