Beispiel #1
0
def main():

    log.basicConfig(format=" [ %(levelname)s] %(message)s",
                    level=log.INFO,
                    stream=sys.stdout)

    args = build_argparser().parse_args()

    #log.info("Loading the network files model")

    voice = sound_decode(args.input)

    infer_engine = BasicEngine(args.model)
    latency, results = infer_engine.RunInference(voice)
    print(latency)
    print(infer_engine.get_inference_time())
    print(results)
Beispiel #2
0
 def test_run_inference(self):
     for model in test_utils.get_model_list():
         print('Testing model :', model)
         engine = BasicEngine(test_utils.test_data_path(model))
         input_data = test_utils.generate_random_input(
             1, engine.required_input_array_size())
         latency, ret = engine.run_inference(input_data)
         self.assertEqual(ret.size, engine.total_output_array_size())
         # Check debugging functions.
         self.assertLess(math.fabs(engine.get_inference_time() - latency),
                         0.001)
         raw_output = engine.get_raw_output()
         self.assertEqual(ret.size, raw_output.size)
         for i in range(ret.size):
             if math.isnan(ret[i]) and math.isnan(raw_output[i]):
                 continue
             self.assertLess(math.fabs(ret[i] - raw_output[i]), 0.001)