Esempio n. 1
0
def thread_infer(thread_id, graph_name, engine, \
                 input_path, loops, compare_path, status):
    """ Do inference of a model in a thread.

  Args:
    thread_id: ID of the thread.
    graph_name: Name of the model or graph.
    engine: An sail.Engine instance.
    input_path: Path to input image.
    loops: Number of loops to run
    compare_path: Path to correct result file
    status: Status of comparison

  Returns:
    None.
  """
    input_name = engine.get_input_names(graph_name)[0]
    input_shape = engine.get_input_shape(graph_name, input_name)
    output_name = engine.get_output_names(graph_name)[0]
    output_shape = engine.get_output_shape(graph_name, output_name)
    in_dtype = engine.get_input_dtype(graph_name, input_name)
    out_dtype = engine.get_output_dtype(graph_name, output_name)
    # get handle to create input and output tensors
    handle = engine.get_handle()
    input = sail.Tensor(handle, input_shape, in_dtype, True, True)
    output = sail.Tensor(handle, output_shape, out_dtype, True, True)
    input_tensors = {input_name: input}
    ouptut_tensors = {output_name: output}
    # set io_mode
    engine.set_io_mode(graph_name, sail.SYSIO)
    reference = get_reference(compare_path)
    compare_type = 'fp32_top5' if out_dtype == sail.BM_FLOAT32 else 'int8_top5'
    # pipeline of inference
    for i in range(loops):
        # read image and preprocess
        image = preprocess(input_path).astype(np.float32)
        # scale input data if input data type is int8 or uint8
        if in_dtype == sail.BM_FLOAT32:
            input_tensors[input_name].update_data(image)
        else:
            scale = engine.get_input_scale(graph_name, input_name)
            input_tensors[input_name].scale_from(image, scale)
        # inference
        engine.process(graph_name, input_tensors, ouptut_tensors)
        # scale output data if output data type is int8 or uint8
        if out_dtype == sail.BM_FLOAT32:
            output_data = output.asnumpy()
        else:
            scale = engine.get_output_scale(graph_name, output_name)
            output_data = output.scale_to(scale)
        # postprocess
        result = postprocess(output_data)
        # print result
        print("Top 5 for {} of loop {} in thread {} on tpu {}: {}".format(\
            graph_name, i, thread_id, engine.get_device_id(), \
            result[1]['top5_idx'][0]))
        if not compare(reference, result[1]['top5_idx'][0], compare_type):
            status[thread_id] = False
            return
    status[thread_id] = True
Esempio n. 2
0
    def detection(self, signal):
        signal = preprocess(signal, fs=self.fs)
        signal_tensor = torch.from_numpy(signal).view(1, 1, -1).float().to(self.device)

        if self.model_type=='fcn':
            outputs = self.model(signal_tensor)  # outputs: 1 x 1 x beats
            beat_locs2 = list(outputs.squeeze().detach().cpu().numpy())
            beat_locs = postprocess(beat_locs2, margin=12)
        else:
            outputs = self.model.sample(signal_tensor)  # outputs: 1 x 1 x beats
            beat_locs = np.rint(outputs.view(-1).detach().cpu().numpy()).astype(np.int) if outputs \
                                                                                           is not None else np.array([])
        return beat_locs
Esempio n. 3
0
def thread_infer(thread_id, engine, input_path, loops, compare_path, status):
    """ Do inference of a model in a thread.

  Args:
    thread_id: ID of the thread
    engine: An sail.Engine instance
    input_path: Path to input image file
    loops: Number of loops to run
    compare_path: Path to correct result file
    status: Status of comparison

  Returns:
    None.
  """
    # get model info
    # only one model loaded for this engine
    # only one input tensor and only one output tensor in this graph
    graph_name = engine.get_graph_names()[0]
    input_name = engine.get_input_names(graph_name)[0]
    input_shape = engine.get_input_shape(graph_name, input_name)
    output_name = engine.get_output_names(graph_name)[0]
    output_shape = engine.get_output_shape(graph_name, output_name)
    out_dtype = engine.get_output_dtype(graph_name, output_name)
    reference = get_reference(compare_path)
    compare_type = 'fp32_top5' if out_dtype == sail.BM_FLOAT32 else 'int8_top5'
    # pipeline of inference
    for i in range(loops):
        # read image and preprocess
        image = preprocess(input_path).astype(np.float32)
        # inference with fp32 input and output
        # data scale(input: fp32 to int8, output: int8 to fp32) is done inside
        # for int8 model
        output = engine.process(graph_name, {input_name: image})
        # postprocess
        result = postprocess(output[output_name])
        # print result
        print("Top 5 of loop {} in thread {} on tpu {}: {}".format(\
            i, thread_id, engine.get_device_id(), result[1]['top5_idx'][0]))
        if not compare(reference, result[1]['top5_idx'][0], compare_type):
            status[thread_id] = False
            return
    status[thread_id] = True
Esempio n. 4
0
def inference(bmodel_path, input_path, loops, tpu_id, compare_path):
  """ Do inference of a model in a thread.

  Args:
    bmodel_path: Path to bmodel
    input_path: Path to input image.
    loops: Number of loops to run
    compare_path: Path to correct result file
    status: Status of comparison

  Returns:
    True for success and False for failure.
  """
  # init Engine to load bmodel and allocate input and output tensors
  engine = sail.Engine(bmodel_path, tpu_id, sail.SYSIO)
  # get model info
  # only one model loaded for this engine
  # only one input tensor and only one output tensor in this graph
  graph_name = engine.get_graph_names()[0]
  input_name = engine.get_input_names(graph_name)[0]
  input_shape = engine.get_input_shape(graph_name, input_name)
  output_name = engine.get_output_names(graph_name)[0]
  output_shape = engine.get_output_shape(graph_name, output_name)
  out_dtype = engine.get_output_dtype(graph_name, output_name);
  reference = get_reference(compare_path)
  compare_type = 'fp32_top5' if out_dtype == sail.BM_FLOAT32 else 'int8_top5'
  # pipeline of inference
  for i in range(loops):
    # read image and preprocess
    image = preprocess(input_path).astype(np.float32)
    # inference with fp32 input and output
    # data scale(input: fp32 to int8, output: int8 to fp32) is done inside
    # for int8 model
    output = engine.process(graph_name, {input_name:image})
    # postprocess
    result = postprocess(output[output_name])
    # print result
    print("Top 5 of loop {} on tpu {}: {}".format(i, tpu_id, \
                                                  result[1]['top5_idx'][0]))
    if not compare(reference, result[1]['top5_idx'][0], compare_type):
      return False
  return True
Esempio n. 5
0
# 参数配置
configs = {
    'img_path': 'test.jpg',
    'save_dir': 'save_img',
    'model_name': 'ExtremeC3_Portrait_Segmentation',
    'use_gpu': False,
    'use_mkldnn': False
}

# 第一步:数据预处理
input_data = preprocess(configs['img_path'])

# 第二步:加载模型
model = InferenceModel(
    modelpath=configs['model_name'], 
    use_gpu=configs['use_gpu'], 
    use_mkldnn=configs['use_mkldnn']
)
model.eval()

# 第三步:模型推理
output = model(input_data)

# 第四步:结果后处理
postprocess(
    output, 
    configs['save_dir'],
    configs['img_path'],
    configs['model_name']
)
Esempio n. 6
0
from processor import preprocess, postprocess

# 参数配置
configs = {
    'img_path': 'test.jpg',
    'save_dir': 'save_img',
    'model_name': 'u2net_portrait',
    'use_gpu': False,
    'use_mkldnn': False
}

# 第一步:数据预处理
input_data = preprocess(configs['img_path'])

# 第二步:加载模型
model = InferenceModel(
    modelpath=configs['model_name'], 
    use_gpu=configs['use_gpu'], 
    use_mkldnn=configs['use_mkldnn']
)
model.eval()

# 第三步:模型推理
d0, _, _, _, _, _, _ = model(input_data)

# 第四步:结果后处理
postprocess(
    d0, 
    configs['save_dir'],
    configs['model_name']
)