Exemplo n.º 1
0
def thread_infer(thread_id, graph_name, engine, \
                 input_path, loops, compare_path, status):
    """ Do inference of a model in a thread.

  Args:
    thread_id: ID of the thread.
    graph_name: Name of the model or graph.
    engine: An sail.Engine instance.
    input_path: Path to input image.
    loops: Number of loops to run
    compare_path: Path to correct result file
    status: Status of comparison

  Returns:
    None.
  """
    input_name = engine.get_input_names(graph_name)[0]
    input_shape = engine.get_input_shape(graph_name, input_name)
    output_name = engine.get_output_names(graph_name)[0]
    output_shape = engine.get_output_shape(graph_name, output_name)
    in_dtype = engine.get_input_dtype(graph_name, input_name)
    out_dtype = engine.get_output_dtype(graph_name, output_name)
    # get handle to create input and output tensors
    handle = engine.get_handle()
    input = sail.Tensor(handle, input_shape, in_dtype, True, True)
    output = sail.Tensor(handle, output_shape, out_dtype, True, True)
    input_tensors = {input_name: input}
    ouptut_tensors = {output_name: output}
    # set io_mode
    engine.set_io_mode(graph_name, sail.SYSIO)
    reference = get_reference(compare_path)
    compare_type = 'fp32_top5' if out_dtype == sail.BM_FLOAT32 else 'int8_top5'
    # pipeline of inference
    for i in range(loops):
        # read image and preprocess
        image = preprocess(input_path).astype(np.float32)
        # scale input data if input data type is int8 or uint8
        if in_dtype == sail.BM_FLOAT32:
            input_tensors[input_name].update_data(image)
        else:
            scale = engine.get_input_scale(graph_name, input_name)
            input_tensors[input_name].scale_from(image, scale)
        # inference
        engine.process(graph_name, input_tensors, ouptut_tensors)
        # scale output data if output data type is int8 or uint8
        if out_dtype == sail.BM_FLOAT32:
            output_data = output.asnumpy()
        else:
            scale = engine.get_output_scale(graph_name, output_name)
            output_data = output.scale_to(scale)
        # postprocess
        result = postprocess(output_data)
        # print result
        print("Top 5 for {} of loop {} in thread {} on tpu {}: {}".format(\
            graph_name, i, thread_id, engine.get_device_id(), \
            result[1]['top5_idx'][0]))
        if not compare(reference, result[1]['top5_idx'][0], compare_type):
            status[thread_id] = False
            return
    status[thread_id] = True
Exemplo n.º 2
0
    def __init__(self, bmodel_path, tpu_id):
        # init Engine
        Net.engine_ = sail.Engine(tpu_id)
        # load bmodel without builtin input and output tensors
        Net.engine_.load(bmodel_path)
        # get model info
        # only one model loaded for this engine
        # only one input tensor and only one output tensor in this graph
        Net.handle_ = Net.engine_.get_handle()
        Net.graph_name_ = Net.engine_.get_graph_names()[0]
        input_names = Net.engine_.get_input_names(Net.graph_name_)
        input_dtype = 0
        Net.tpu_id_ = tpu_id
        Net.input_name_ = input_names[0]
        for i in range(len(input_names)):
            Net.input_shapes_[input_names[i]] = Net.engine_.get_input_shape(
                Net.graph_name_, input_names[i])
            input_dtype = Net.engine_.get_input_dtype(Net.graph_name_,
                                                      input_names[i])
            input = sail.Tensor(Net.handle_, Net.input_shapes_[input_names[i]],
                                input_dtype, False, False)
            Net.input_tensors_[input_names[i]] = input
            Net.input_dtype_ = input_dtype
        Net.output_names_ = Net.engine_.get_output_names(Net.graph_name_)
        for i in range(len(Net.output_names_)):
            Net.output_shapes_[
                Net.output_names_[i]] = Net.engine_.get_output_shape(
                    Net.graph_name_, Net.output_names_[i])
            output_dtype = Net.engine_.get_output_dtype(
                Net.graph_name_, Net.output_names_[i])
            print(Net.output_shapes_[Net.output_names_[i]])
            output = sail.Tensor(Net.handle_,
                                 Net.output_shapes_[Net.output_names_[i]],
                                 output_dtype, True, True)
            Net.output_tensors_[Net.output_names_[i]] = output
            for j in range(4):
                Net.output_shapes_array_.append(
                    Net.output_shapes_[Net.output_names_[i]][j])
        print(Net.input_shapes_)
        print(Net.output_shapes_)

        # set io_mode
        Net.engine_.set_io_mode(Net.graph_name_, sail.IOMode.SYSIO)
        Net.bmcv_ = sail.Bmcv(Net.handle_)
        Net.img_dtype_ = Net.bmcv_.get_bm_image_data_format(input_dtype)
        scale = Net.engine_.get_input_scale(Net.graph_name_, input_names[0])
        scale *= 0.003922
        Net.preprocessor_ = PreProcessor(Net.bmcv_, scale)

        # load postprocess so
        ll = ctypes.cdll.LoadLibrary
        Net.lib_post_process_ = ll('./libYoloPostProcess.so')

        if os.path.exists('result_imgs') is False:
            os.system('mkdir -p result_imgs')
Exemplo n.º 3
0
    def __init__(self, bmodel_path, tpu_id, stage):
        # init Engine
        Net.engine_ = sail.Engine(tpu_id)
        # load bmodel without builtin input and output tensors
        Net.engine_.load(bmodel_path)
        # get model info
        # only one model loaded for this engine
        # only one input tensor and only one output tensor in this graph
        Net.handle_ = Net.engine_.get_handle()
        Net.graph_name_ = Net.engine_.get_graph_names()[0]
        input_names = Net.engine_.get_input_names(Net.graph_name_)
        print("input names:", input_names)
        input_dtype = 0
        Net.tpu_id_ = tpu_id
        Net.input_name_ = input_names[0]
        for i in range(len(input_names)):
            Net.input_shapes_[input_names[i]] = Net.engine_.get_input_shape(
                Net.graph_name_, input_names[i])
            input_dtype = Net.engine_.get_input_dtype(Net.graph_name_,
                                                      input_names[i])
            input = sail.Tensor(Net.handle_, Net.input_shapes_[input_names[i]],
                                input_dtype, False, False)
            Net.input_tensors_[input_names[i]] = input
            Net.input_dtype_ = input_dtype
        Net.output_names_ = Net.engine_.get_output_names(Net.graph_name_)
        for i in range(len(Net.output_names_)):
            Net.output_shapes_[
                Net.output_names_[i]] = Net.engine_.get_output_shape(
                    Net.graph_name_, Net.output_names_[i])
            output_dtype = Net.engine_.get_output_dtype(
                Net.graph_name_, Net.output_names_[i])
            output = sail.Tensor(Net.handle_,
                                 Net.output_shapes_[Net.output_names_[i]],
                                 output_dtype, True, True)
            Net.output_tensors_[Net.output_names_[i]] = output
        print("input shapes:", Net.input_shapes_)
        print("output shapes:", Net.output_shapes_)

        # set io_mode
        Net.engine_.set_io_mode(Net.graph_name_, sail.IOMode.SYSIO)
        Net.bmcv_ = sail.Bmcv(Net.handle_)
        Net.img_dtype_ = Net.bmcv_.get_bm_image_data_format(input_dtype)
        scale = Net.engine_.get_input_scale(Net.graph_name_, input_names[0])
        print("scale", scale)
        scale *= 0.003922
        Net.preprocessor_ = PreProcessor(Net.bmcv_, scale)

        # load postprocess so
        ll = ctypes.cdll.LoadLibrary
        Net.lib_post_process_ = ll('./post_process_lib/libPostProcess.so')
        Net.lib_post_process_.post_process_hello()
  def __init__(self, bmodel_path, tpu_id):
    # init Engine
    Net.engine_ = sail.Engine(tpu_id)
    # load bmodel without builtin input and output tensors
    Net.engine_.load(bmodel_path)
    # get model info
    # only one model loaded for this engine
    # only one input tensor and only one output tensor in this graph
    Net.handle_ = Net.engine_.get_handle()
    Net.graph_name_ = Net.engine_.get_graph_names()[0]
    Net.input_names_ = Net.engine_.get_input_names(Net.graph_name_)
    input_dtype = 0
    Net.tpu_id_ = tpu_id
    for i in range(len(Net.input_names_)): 
      Net.input_shapes_[Net.input_names_[i]] = Net.engine_.get_input_shape(Net.graph_name_, Net.input_names_[i])
      input_dtype = Net.engine_.get_input_dtype(Net.graph_name_, Net.input_names_[i])
      alloc_flag = False
      if i == 1:
        alloc_flag = True
      input = sail.Tensor(Net.handle_, Net.input_shapes_[Net.input_names_[i]], input_dtype, alloc_flag, alloc_flag)
      Net.input_tensors_[Net.input_names_[i]] = input
      Net.input_dtype_ = input_dtype
    Net.output_names_ = Net.engine_.get_output_names(Net.graph_name_)
    for i in range(len(Net.output_names_)): 
      Net.output_shapes_[Net.output_names_[i]] = Net.engine_.get_output_shape(Net.graph_name_, Net.output_names_[i])
      output_dtype = Net.engine_.get_output_dtype(Net.graph_name_, Net.output_names_[i])
      output = sail.Tensor(Net.handle_, Net.output_shapes_[Net.output_names_[i]], output_dtype, True, True)
      Net.output_tensors_[Net.output_names_[i]] = output
    print (Net.input_shapes_)
    print (Net.output_shapes_)

    # set io_mode
    Net.engine_.set_io_mode(Net.graph_name_, sail.IOMode.SYSO)
    Net.bmcv_ = sail.Bmcv(Net.handle_)
    Net.img_dtype_ = Net.bmcv_.get_bm_image_data_format(input_dtype)
    scale = Net.engine_.get_input_scale(Net.graph_name_, Net.input_names_[0])
    scale *= 0.003922
    Net.preprocessor_ = PreProcessor(Net.bmcv_, scale)

    if os.path.exists('result_imgs') is False:
      os.system('mkdir -p result_imgs')
Exemplo n.º 5
0
 def dis_image(img):
     dis_img = sail.BMImage(Net.handle_, img.height(), img.width(),
                            sail.Format.FORMAT_BGR_PLANAR, img.dtype())
     Net.bmcv_.vpp_resize(img, dis_img, img.width(), img.height())
     t_img_tensor = sail.Tensor(
         Net.handle_, [1, 3, img.height(), img.width()],
         sail.Dtype.BM_UINT8, True, False)
     Net.bmcv_.bm_image_to_tensor(dis_img, t_img_tensor)
     t_img_tensor.sync_d2s()
     np_t_img_tensor = t_img_tensor.asnumpy()
     np_t_img_tensor = np_t_img_tensor.transpose((0, 2, 3, 1))
     np_t_img_tensor = np_t_img_tensor.reshape(
         [img.height(), img.width(), 3])
     cv2.imshow('det_result', np.uint8(np_t_img_tensor))
     cv2.waitKey(10)
Exemplo n.º 6
0
    def detect(self, video_path):
        print("video_path=", video_path)
        # open a video to be decoded
        decoder = sail.Decoder(video_path, True, Net.tpu_id_)
        frame_id = 0
        cap = cv2.VideoCapture(video_path)
        while 1:
            img = sail.BMImage()
            # decode a frame from video
            ret = decoder.read(Net.handle_, img)
            if ret != 0:
                print("Finished to read the video!")
                return

            # preprocess image for inference
            img_resized = sail.BMImage(Net.handle_,
                                       Net.input_shapes_[Net.input_name_][2],
                                       Net.input_shapes_[Net.input_name_][3],
                                       sail.Format.FORMAT_BGR_PLANAR,
                                       img.dtype())
            img_processed = sail.BMImage(Net.handle_,
                                         Net.input_shapes_[Net.input_name_][2],
                                         Net.input_shapes_[Net.input_name_][3],
                                         sail.Format.FORMAT_BGR_PLANAR,
                                         Net.img_dtype_)
            # resize origin image
            Net.preprocessor_.process(img, img_resized,
                                      Net.input_shapes_[Net.input_name_][2],
                                      Net.input_shapes_[Net.input_name_][3])

            # split
            Net.bmcv_.convert_to(img_resized, img_processed, ((Net.preprocessor_.ab[0], Net.preprocessor_.ab[1]), \
                                                                 (Net.preprocessor_.ab[2], Net.preprocessor_.ab[3]), \
                                                                 (Net.preprocessor_.ab[4], Net.preprocessor_.ab[5])))
            Net.bmcv_.bm_image_to_tensor(img_processed,
                                         Net.input_tensors_[Net.input_name_])
            # do inference
            #print(Net.input_shapes_)
            Net.engine_.process(Net.graph_name_, Net.input_tensors_,
                                Net.input_shapes_, Net.output_tensors_)
            #out_data = Net.output_tensors_[Net.output_names_[0]].pysys_data()
            #print(out_data)

            # post process, nms
            CLONG_P_INPUT = len(Net.output_tensors_) * ctypes.c_long
            Net.post_process_inputs_ = CLONG_P_INPUT()
            for i in range(len(Net.output_tensors_)):
                output_data = Net.output_tensors_[
                    Net.output_names_[i]].pysys_data()
                Net.post_process_inputs_[i] = output_data[0]

            ##############################################
            #show image
            dis_img = sail.BMImage(Net.handle_, img.height(), img.width(),
                                   sail.Format.FORMAT_BGR_PLANAR, img.dtype())
            Net.bmcv_.vpp_resize(img, dis_img, img.width(), img.height())
            t_img_tensor = sail.Tensor(
                Net.handle_,
                [1, 3, img.height(), img.width()], sail.Dtype.BM_UINT8, True,
                False)
            Net.bmcv_.bm_image_to_tensor(dis_img, t_img_tensor)
            t_img_tensor.sync_d2s()
            np_t_img_tensor = t_img_tensor.asnumpy()
            np_t_img_tensor = np_t_img_tensor.transpose((0, 2, 3, 1))
            np_t_img_tensor = np_t_img_tensor.reshape(
                [img.height(), img.width(), 3])
            ori_img = np.uint8(np_t_img_tensor)
            ori_img = ori_img.ctypes.data_as(ctypes.c_char_p)
            ##############################################

            #print( Net.output_shapes_)
            pointnum = Net.lib_post_process_.post_process(Net.post_process_inputs_, 1,img.width(), img.height(), \
                    Net.input_shapes_[Net.input_name_][3], \
                    Net.input_shapes_[Net.input_name_][2], \
                    Net.output_shapes_[Net.output_names_[0]][3], \
                    Net.output_shapes_[Net.output_names_[0]][2], \
                    len(Net.output_tensors_), \
                    Net.output_shapes_[Net.output_names_[0]][3] * Net.output_shapes_[Net.output_names_[0]][2] * Net.output_shapes_[Net.output_names_[0]][1], \
                    ori_img)
            frame_id += 1
Exemplo n.º 7
0
def inference(bmodel_path, input_path, loops, tpu_id, compare_path):
    """ Load a bmodel and do inference.
  Args:
   bmodel_path: Path to bmodel
   input_path: Path to input file
   loops: Number of loops to run
   tpu_id: ID of TPU to use
   compare_path: Path to correct result file

  Returns:
    True for success and False for failure
  """
    # init Engine
    engine = sail.Engine(tpu_id)
    # load bmodel without builtin input and output tensors
    engine.load(bmodel_path)
    # get model info
    # only one model loaded for this engine
    # only one input tensor and only one output tensor in this graph
    graph_name = engine.get_graph_names()[0]
    input_name = engine.get_input_names(graph_name)[0]
    output_name = engine.get_output_names(graph_name)[0]
    input_shape = [4, 3, 300, 300]
    input_shapes = {input_name: input_shape}
    output_shape = [1, 1, 800, 7]
    input_dtype = engine.get_input_dtype(graph_name, input_name)
    output_dtype = engine.get_output_dtype(graph_name, output_name)
    is_fp32 = (input_dtype == sail.Dtype.BM_FLOAT32)
    # get handle to create input and output tensors
    handle = engine.get_handle()
    input = sail.Tensor(handle, input_shape, input_dtype, False, False)
    output = sail.Tensor(handle, output_shape, output_dtype, True, True)
    input_tensors = {input_name: input}
    output_tensors = {output_name: output}
    # set io_mode
    engine.set_io_mode(graph_name, sail.IOMode.SYSO)
    # init bmcv for preprocess
    bmcv = sail.Bmcv(handle)
    img_dtype = bmcv.get_bm_image_data_format(input_dtype)
    # init preprocessor and postprocessor
    scale = engine.get_input_scale(graph_name, input_name)
    preprocessor = PreProcessor(bmcv, scale)
    threshold = 0.59 if is_fp32 else 0.52
    postprocessor = PostProcess(threshold)
    reference = postprocessor.get_reference(compare_path)
    # init decoder
    decoder = sail.Decoder(input_path, True, tpu_id)
    status = True
    # pipeline of inference
    for i in range(loops):
        imgs_0 = sail.BMImageArray4D()
        imgs_1 = sail.BMImageArray4D(handle, input_shape[2], input_shape[3], \
                                     sail.Format.FORMAT_BGR_PLANAR, img_dtype)
        # read 4 frames from input video for batch size is 4
        flag = False
        for j in range(4):
            ret = decoder.read_(handle, imgs_0[j])
            if ret != 0:
                print("Finished to read the video!")
                flag = True
                break
        if flag:
            break
        # preprocess
        preprocessor.process(imgs_0, imgs_1)
        bmcv.bm_image_to_tensor(imgs_1, input)
        # inference
        engine.process(graph_name, input_tensors, input_shapes, output_tensors)
        # postprocess
        real_output_shape = engine.get_output_shape(graph_name, output_name)
        out = output.asnumpy(real_output_shape)
        dets = postprocessor.process(out, imgs_0[0].width(),
                                     imgs_0[0].height())
        # print result
        if postprocessor.compare(reference, dets, i):
            for j, vals in dets.items():
                frame_id = int(i * 4 + j + 1)
                img0 = sail.BMImage(imgs_0[j])
                for class_id, score, x0, y0, x1, y1 in vals:
                    msg = '[Frame {} on tpu {}] Category: {}, Score: {:.3f},'
                    msg += ' Box: [{}, {}, {}, {}]'
                    print(
                        msg.format(frame_id, tpu_id, class_id, score, x0, y0,
                                   x1, y1))
                    bmcv.rectangle(img0, x0, y0, x1 - x0 + 1, y1 - y0 + 1,
                                   (255, 0, 0), 3)
                bmcv.imwrite('result-{}.jpg'.format(frame_id), img0)
        else:
            status = False
            break
    return status