def __init__(self, config):
        super().__init__(config)
        self.trt_engine_path = os.path.expanduser(
            config[config["INFERENCE_ENGINE"]]["TRT_ENGINE_PATH"])
        self.video_path = config[config["INFERENCE_ENGINE"]]["VIDEO_PATH"]
        self.inference_transform = self.generate_transform()
        self.create_engine = config[
            config["INFERENCE_ENGINE"]]["CREATE_ENGINE"]
        self.input_size = config["TRANSFORM"]["RESIZE"]
        self.input_channel = config["DATA_GENERATOR"]["OUTPUT_IMAGE_CHANNELS"]
        self.pb_file_path = ZL_CACHE.fetch(
            config[config["INFERENCE_ENGINE"]]["PB_FILE_PATH"])
        self.input_name = config[config["INFERENCE_ENGINE"]]["INPUT_NAME"]
        self.output_name = config[config["INFERENCE_ENGINE"]]["OUTPUT_NAME"]
        self.num_frames = config[config["INFERENCE_ENGINE"]]["NUM_FRAMES"]
        if self.create_engine == True:
            model_data = ModelData(
                self.pb_file_path,
                self.input_name,
                (self.input_channel, self.input_size[0], self.input_size[1]),
                self.output_name,
                config[config["INFERENCE_ENGINE"]]["FP16_MODE"],
                self.trt_engine_path,
            )
            self.build_and_dump_engine(model_data)

        self.batch_size = 1
        try:
            self.engine = self.load_engine(self.trt_engine_path)
            self.context = self.engine.create_execution_context()
            self.inputs, self.outputs, self.bindings, self.stream = allocate_buffers(
                self.engine)

        except:
            print("Failed to load engine")
Exemplo n.º 2
0
def run_inference(config, freeze_to_pb_path=None):
    inference_factory = InferenceFactory(config)
    inference = inference_factory.create_inference(
        config[config["INFERENCE_ENGINE"]]["INFERENCE_NAME"])

    if freeze_to_pb_path != None:
        inference.freeze_to_pb(freeze_to_pb_path)
        if FLAGS.upload:
            ZL_CACHE.upload("{}/frozen_model.pb".format(freeze_to_pb_path))

    else:
        output = inference.run_inference()
        if FLAGS.debug:
            output_tf = inference.run_inference_tf()
            np.testing.assert_array_almost_equal(np.array(output).ravel(),
                                                 np.array(output_tf).ravel(),
                                                 decimal=4)
 def run_inference(self):
     file_list = sorted(directory_to_file_list(self.video_path))
     file_count = 1
     total_count = len(file_list)
     output = []
     for file_path in file_list:
         for idx, frame in enumerate(stream_video(
                 ZL_CACHE.fetch(file_path))):
             output.append(self.get_image_pred(frame, True))
             if idx > self.num_frames:
                 break
     logging.info("================Inference Complete=============")
     return output
 def run_inference_tf(self):
     file_list = sorted(directory_to_file_list(self.video_path))
     file_count = 1
     total_count = len(file_list)
     model = ModelInferTF(self.pb_file_path, self.input_name,
                          self.output_name)
     output = []
     for file_path in file_list:
         for idx, frame in enumerate(stream_video(
                 ZL_CACHE.fetch(file_path))):
             frame, _ = self.inference_transform.apply_transforms(
                 frame, frame[:, :, 1])
             frame = np.expand_dims(frame, axis=0)
             output.append(model.infer(frame))
             print(output[-1])
             if idx > self.num_frames:
                 break
     return output