コード例 #1
0
def common_transfer(pb_name,export_name):
        # 특정 로그를 참조하기위함.
        ret = 0
        rknn = RKNN()
        # 이 단계는 grayscale 이미지를 다룰 때 필요하지 않음.
        # rknn.config(channel_mean_value='', reorder_channel='')
        print('--> Loading model')

        ret = rknn.load_tensorflow(
                tf_pb='./mnist_frozen_graph.pb',
                inputs=['x'],
                outputs=['y_conv'],
                input_size_list=[[28,28,1]])
        if ret != 0:
                print('load_tensorflow error')
                rknn.release()
                return ret
        print('done')
        print('--> Building model')
        rknn.build(do_quantization=False)
        print('done')
        # rknn 모델 파일 저장 내보내기
        rknn.export_rknn('./mnist.rknn')
        # Release RKNN Context
        rknn.release()
        return ret
コード例 #2
0
def rknn_convert(input_model, output_model, dataset_file, target_platform):
    # Create RKNN object
    rknn = RKNN()
    print('--> config model')
    rknn.config(channel_mean_value='127.5 127.5 127.5 127.5', reorder_channel='0 1 2', batch_size=1, target_platform=target_platform)

    # Load onnx model
    print('--> Loading model')
    ret = rknn.load_onnx(model=input_model)
    if ret != 0:
        print('Load failed!')
        exit(ret)

    # Build model
    print('--> Building model')
    ret = rknn.build(do_quantization=True, dataset=dataset_file, pre_compile=True)
    if ret != 0:
        print('Build  failed!')
        exit(ret)

    # Export rknn model
    print('--> Export RKNN model')
    ret = rknn.export_rknn(output_model)
    if ret != 0:
        print('Export .rknn failed!')
        exit(ret)

    # Release RKNN object
    rknn.release()
コード例 #3
0
def transfer(pb_path, rknn_name):
  # Create RKNN object
  rknn = RKNN()

  # init runtime environment
  print('--> Init runtime environment')
  ret = rknn.init_runtime()
  if ret != 0:
      print('Init runtime environment failed')
      exit(ret)
  print('done')

  # Config for Model Input PreProcess
  rknn.config()

  # Load TensorFlow Model
  print('--> Loading model')
  rknn.load_tensorflow(tf_pb=pb_path,
                       inputs=['Reshape'],
                       outputs=['probability'],
                       input_size_list=[[INPUT_WIDTH, INPUT_HEIGHT, 1]])
  print('done')

  # Build Model
  print('--> Building model')
  rknn.build(do_quantization=False)
  print('done')

  # Export RKNN Model
  rknn.export_rknn(rknn_name)

  # Release RKNN Context
  rknn.release()
コード例 #4
0
def test_rknn():
    from rknn.api import RKNN
    rknn = RKNN()

    # Load rknn model
    print('--> Load RKNN model')
    ret = rknn.load_rknn('lprnet.rknn')
    if ret != 0:
        print('Export model failed!')
        exit(ret)

    # init runtime environment
    print('--> Init runtime environment')
    ret = rknn.init_runtime(target='rk1808')
    if ret != 0:
        print('Init runtime environment failed')
        exit(ret)
    print('done')

    # Inference
    print('--> Running model')
    image = cv2.imread('data/eval/000256.png')
    outputs = rknn.inference(inputs=[image])
    preds = outputs[0]
    labels, pred_labels = decode(preds, CHARS)
    print(labels)
    print('done')

    rknn.release()
コード例 #5
0
def main():
    
    minsize = 20
    threshold = [0.6, 0.7, 0.7]
    factor = 0.709
    pnet_rknn_list=init_pnet()
    rnet_rknn = RKNN()
    onet_rknn = RKNN()
    rnet_rknn.load_rknn('./RNet.rknn')
    onet_rknn.load_rknn('./ONet.rknn')
    ret = rnet_rknn.init_runtime()
    if ret != 0:
        print('Init rnet runtime environment failed')
        exit(ret)
    ret = onet_rknn.init_runtime()
    if ret != 0:
        print('Init onet runtime environment failed')
        exit(ret)
    #error = []
    #f = open(imglistfile, 'r')
    sys.stdout = open('/dev/stdout', 'w')
    sys.stderr = open('/dev/stderr', 'w')
    capture = cv2.VideoCapture(0)
    while (True):
        tic()
        ret, img = capture.read()
        if ret == True:
            #img = cv2.imread('./test4.jpg')
            img = cv2.resize(img,(450,344))
            img_matlab = img.copy()
            img_matlab = cv2.cvtColor(img_matlab, cv2.COLOR_BGR2RGB)
            toc()
            print("capture--------------------")
            #img_matlab = img.copy()
            #tmp = img_matlab[:,:,2].copy()
            #img_matlab[:,:,2] = img_matlab[:,:,0]
            #img_matlab[:,:,0] = tmp
            tic()
            # check rgb position
            boundingboxes, points = detect_face(img_matlab, minsize, pnet_rknn_list, rnet_rknn, onet_rknn, threshold, False, factor)
            toc()
            tic()
            img = drawBoxes(img, boundingboxes)
            
            cv2.imshow('img', img)
            c = cv2.waitKey(5) & 0xff
            if c==27:
                break
            toc()
            print("imshow--------------------")
	        #if boundingboxes.shape[0] > 0:
            #    error.append[imgpath]
            #print(error)
    i=0
    for i in range(9):
        pnet_rknn_list[i].release()
    rnet_rknn.release()
    onet_rknn.release()
    cv2.destroyAllWindows()                
    capture.release()  
コード例 #6
0
def quantify_transfer(pb_name, dataset_name, export_name):
    ret = 0
    print(pb_name, dataset_name, export_name)
    rknn = RKNN()
    rknn.config(channel_mean_value='',
                reorder_channel='',
                quantized_dtype='dynamic_fixed_point-8')
    print('--> Loading model')
    ret = rknn.load_tensorflow(tf_pb=pb_name,
                               inputs=['test/x'],
                               outputs=['test/hypothesis'],
                               input_size_list=[[1, 4]])
    if ret != 0:
        print('load_tensorflow error')
        rknn.release()
        return ret
    print('done')
    print('--> Building model')
    rknn.build(do_quantization=True, dataset=dataset_name)
    print('done')
    # rknn 모델 파일 저장 내보내기
    rknn.export_rknn(export_name)
    # Release RKNN Context
    rknn.release()
    return ret
コード例 #7
0
def quanlization_darknet_model(model_path,
                               weight_path,
                               dataset_txt,
                               is_quantization=True,
                               pre_compile=False):
    # 创建RKNN执行对象
    rknn = RKNN(verbose=True, verbose_file='verbose.log')
    rknn.config(channel_mean_value='0 0 0 255',
                reorder_channel='0 1 2',
                batch_size=4)
    flag = rknn.load_darknet(model=model_path, weight=weight_path)
    # 后面不用修改
    if flag == 0:
        print('load_darknet success')
    else:
        print('load_darknet failure')
    print('done')
    # 创建解析pb模型
    # do_quantization=False指定不进行量化
    # 量化会减小模型的体积和提升运算速度,但是会有精度的丢失
    print('--> Building model')
    print(os.getcwd())
    # flag = rknn.build(do_quantization=False)
    if is_quantization == True:
        flag = rknn.build(do_quantization=True,
                          dataset=dataset_txt,
                          pre_compile=pre_compile)
        print('do quantization ')
        # 导出保存rknn模型文件
        save_rknn_path = model_dir + '/' + model_path.split('/')[-1].split(
            '.')[0] + '_quan.rknn'
    else:
        flag = rknn.build(do_quantization=False)
        print('not do quantization')
        # 导出保存rknn模型文件
        save_rknn_path = model_dir + '/' + model_path.split('/')[-1].split(
            '.')[0] + '.rknn'

    if flag == 0:
        print('build success')
    else:
        print('build failure')
    print('done')

    flag = rknn.export_rknn(save_rknn_path)
    if flag == 0:
        print('export success')
    else:
        print('export failure')
    print('done')
    print(save_rknn_path)
    # Release RKNN Context
    rknn.release()
    print('save_rknn_path:', save_rknn_path)
コード例 #8
0
ファイル: rknn_transfer.py プロジェクト: Jeff-Zhao1999/person
def transfer(pb_path, rknn_name):
    # 创建RKNN执行对象
    #rknn = RKNN(verbose=True, verbose_file='./mini_XCEPTION_build.log')
    rknn = RKNN()
# 配置模型输入,用于NPU对数据输入的预处理
# channel_mean_value='0 0 0 255',那么模型推理时,将会对RGB数据做如下转换
# (R - 0)/255, (G - 0)/255, (B - 0)/255。推理时,RKNN模型会自动做均值和归一化处理
# reorder_channel=’0 1 2’用于指定是否调整图像通道顺序,设置成0 1 2即按输入的图像通道顺序不做调整
# reorder_channel=’2 1 0’表示交换0和2通道,如果输入是RGB,将会被调整为BGR。如果是BGR将会被调整为RGB
#图像通道顺序不做调整
    #rknn.config(channel_mean_value='0 0 0 255', reorder_channel='0 1 2')
    rknn.config(quantized_dtype='dynamic_fixed_point-8')
 
# 加载TensorFlow模型
# tf_pb='digital_gesture.pb'指定待转换的TensorFlow模型
# inputs指定模型中的输入节点
# outputs指定模型中输出节点
# input_size_list指定模型输入的大小
    print('--> Loading model')
    ret = rknn.load_tensorflow(tf_pb=pb_path,
                         inputs=['input_1'],
                         outputs=['predictions/Softmax'],
                         input_size_list=[[INPUT_WIDTH, INPUT_HEIGHT, 1]])
    if ret != 0:
        print('Load Model failed!')
        exit(ret)
    print('done')
 
# 创建解析pb模型
# do_quantization=False指定不进行量化
# 量化会减小模型的体积和提升运算速度,但是会有精度的丢失
    print('--> Building model')
    ret = rknn.build(do_quantization=False)
    if ret != 0:
        print('Build Model failed!')
        exit(ret)
    print('done')
 
    # 导出保存rknn模型文件
    print('--> Export RKNN model')
    ret = rknn.export_rknn(rknn_name)
    if ret != 0:
        print('Export Model failed!')
        exit(ret)
    print('done')
 
    # Release RKNN Context
    rknn.release()
コード例 #9
0
    def save_rknn(self,
                  rknnpath,
                  verbose=True,
                  verbose_file=None,
                  input_mean_value='0 0 0 1',
                  input_channels='0 1 2',
                  do_quantization=True,
                  pre_compile=True):
        TMP_PB_PATH = './tmp.pb'
        from rknn.api import RKNN
        self.save_pb(TMP_PB_PATH)

        rknn = RKNN(verbose=verbose, verbose_file=verbose_file)

        print('--> config model')
        rknn.config(channel_mean_value=input_mean_value,
                    reorder_channel=input_channels)
        print('done')

        print('--> Loading pb, input shape = ' + str([self.__input_shape]))
        ret = rknn.load_tensorflow(tf_pb=TMP_PB_PATH,
                                   inputs=[self.input.op.name],
                                   outputs=[self.output.op.name],
                                   input_size_list=[list(self.__input_shape)])
        if ret != 0:
            print('Load pb failed! Ret = {}'.format(ret))
            exit(ret)
        print('done')

        print('--> Building model')
        ret = rknn.build(do_quantization=do_quantization,
                         dataset='./rknn_quantization.txt',
                         pre_compile=pre_compile)
        if ret != 0:
            print('Build model failed!')
            exit(ret)
        print('done')

        print('--> Export RKNN model')
        ret = rknn.export_rknn(rknnpath)
        if ret != 0:
            print('Export rknn failed!')
            exit(ret)
        print('done')

        rknn.release()
コード例 #10
0
def rknn_convert(input_model, output_model, model_input_shape, output_tensor_num, dataset_file, target_platform):
    # Create RKNN object
    rknn = RKNN()
    print('--> config model')
    rknn.config(channel_mean_value='0 0 0 255', reorder_channel='0 1 2', batch_size=1, target_platform=target_platform)

    # Load tensorflow model
    print('--> Loading model')
    if output_tensor_num == 1:
        output_tensor_names = ['predict_conv/BiasAdd']
    elif output_tensor_num == 2:
        output_tensor_names = ['predict_conv_1/BiasAdd', 'predict_conv_2/BiasAdd']
    elif output_tensor_num == 3:
        output_tensor_names = ['predict_conv_1/BiasAdd', 'predict_conv_2/BiasAdd', 'predict_conv_3/BiasAdd']
    else:
        raise ValueError('invalid output tensor number ', output_tensor_num)


    ret = rknn.load_tensorflow(tf_pb=input_model,
                               inputs=['image_input'],
                               outputs=output_tensor_names,
                               input_size_list=[model_input_shape+(3,)],
                               predef_file=None)
    #ret = rknn.load_onnx(model=input_model)
    if ret != 0:
        print('Load failed!')
        exit(ret)

    # Build model
    print('--> Building model')
    ret = rknn.build(do_quantization=True, dataset=dataset_file, pre_compile=True)
    if ret != 0:
        print('Build  failed!')
        exit(ret)

    # Export rknn model
    print('--> Export RKNN model')
    ret = rknn.export_rknn(output_model)
    if ret != 0:
        print('Export .rknn failed!')
        exit(ret)

    # Release RKNN object
    rknn.release()
コード例 #11
0
def load_init_model(rknn_path, device_id):
    # 创造一个RKNN对象
    rknn = RKNN(verbose=True, verbose_file='verbose.log')
    rknn.load_rknn(rknn_path)
    # init runtime environment
    print('--> Init runtime environment')
    # 初始化运行环境,主机,比较慢
    if device_id == '':
        ret = rknn.init_runtime()
        print('device_id')
    else:
        ret = rknn.init_runtime(target='rk1808',
                                device_id=device_id)  # 计算棒被动模式
    # 如果ret不等于0
    if ret != 0:
        # 输出初始化运行环境失败
        print('Init runtime environment failed')
        # 直接退出
        rknn.release()
        return None
    return rknn
コード例 #12
0
def rknn_convert(input_model, output_model, model_input_shape, dataset_file,
                 target_platform):
    # Create RKNN object
    rknn = RKNN()
    print('--> config model')
    rknn.config(channel_mean_value='0 0 0 255',
                reorder_channel='0 1 2',
                batch_size=1,
                target_platform=target_platform)

    # Load tensorflow model
    print('--> Loading model')
    ret = rknn.load_tensorflow(tf_pb=input_model,
                               inputs=['image_input'],
                               outputs=['dense/Softmax'],
                               input_size_list=[model_input_shape + (3, )],
                               predef_file=None)
    #ret = rknn.load_onnx(model=input_model)
    if ret != 0:
        print('Load failed!')
        exit(ret)

    # Build model
    print('--> Building model')
    ret = rknn.build(do_quantization=True,
                     dataset=dataset_file,
                     pre_compile=True)
    if ret != 0:
        print('Build  failed!')
        exit(ret)

    # Export rknn model
    print('--> Export RKNN model')
    ret = rknn.export_rknn(output_model)
    if ret != 0:
        print('Export .rknn failed!')
        exit(ret)

    # Release RKNN object
    rknn.release()
コード例 #13
0
ファイル: rknn_server_class.py プロジェクト: leokwu/rk_ctb
    def __deal(self, model, post_func):

        rknn = RKNN()
        ret = rknn.load_rknn(path=model)

        # init runtime environment
        logger.debug('--> Init runtime environment')
        ret = rknn.init_runtime()
        if ret != 0:
            logger.error('Init runtime environment failed')
            exit(ret)
        logger.debug('Init done')

        r_list = [self.rfd]
        w_list = [self.wfd]
        e_list = [self.rfd, self.wfd]

        while True:
            fd_r_list, fd_w_list, fd_e_list = select.select(
                r_list, w_list, e_list, select_timeout)
            if not (fd_r_list or fd_w_list or fd_e_list):
                continue
            for rs in fd_r_list:
                if rs is self.rfd:
                    decimg = self.__recieve_frame()
                    # logger.debug('__recieve_frame: %d' % (len(decimg)))
                    if decimg is None:
                        logger.error('decimg is None')
                        continue
                    outputs = rknn.inference(inputs=[decimg])
                    data = post_func(outputs)
                    for ws in fd_w_list:
                        if ws is self.wfd:
                            self.__send_result(data)
            for es in fd_e_list:
                logger.error("error fd list: %s" % (es))

        rknn.release()
        logger.debug('__deal finish')
コード例 #14
0
    if ret != 0:
        print('Export xception.rknn failed!')
        exit(ret)
    print('done')

    # ret = rknn.load_rknn('./xception.rknn')

    # Set inputs
    img = cv2.imread(IMG_PATH)
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

    # init runtime environment
    print('--> Init runtime environment')
    ret = rknn.init_runtime()
    #ret = rknn.init_runtime(target='rk1808')
    if ret != 0:
        print('Init runtime environment failed')
        exit(ret)
    print('done')

    # Inference
    print('--> Running model')
    outputs = rknn.inference(inputs=[img])
    #outputs[0].tofile('out.txt', '\n')

    show_outputs(outputs)
    print('done')

    rknn.release()

コード例 #15
0
def main(folder="test"):
    folder = folder
    files = os.listdir(folder)

    for i in range(len(files)):
        img = cv2.imread("{}/{}".format(folder, files[i]))
        img = (img - 127.5) / 127.5
        h, w = img.shape[:2]
        print("w, h = ", w, h)
        input = cv2.resize(img, (PRESET, PRESET),
                           interpolation=cv2.INTER_CUBIC)
        input = input.reshape(PRESET, PRESET, 3)
        input = np.array(input, dtype=np.float32)

        rknn = RKNN()
        print('--> Loading model')

        #rknn.config(channel_mean_value='0 0 0 255', reorder_channel='0 1 2')

        # Load TensorFlow Model
        print('--> Loading model')
        rknn.load_tensorflow(tf_pb='pretrained/SR_freeze.pb',
                             inputs=['ESRGAN_g/Conv2D'],
                             outputs=['output_image'],
                             input_size_list=[[PRESET, PRESET, 3]])
        print('done')

        # Build Model
        print('--> Building model')
        rknn.build(do_quantization=False)
        print('done')

        # Export RKNN Model
        rknn.export_rknn('./sr_rknn.rknn')

        # Direct Load RKNN Model
        rknn.load_rknn('./sr_rknn.rknn')

        # init runtime environment
        print('--> Init runtime environment')
        ret = rknn.init_runtime()
        if ret != 0:
            print('Init runtime environment failed')

        # Inference
        print('--> Running model')

        output_image = rknn.inference(inputs=[input])
        print('complete')
        out = np.array(output_image, dtype=np.float64)
        print("output_image = ", out.shape)
        out = np.squeeze(out)

        Y_ = out.reshape(PRESET * 4, PRESET * 4, 3)
        Y_ = cv2.resize(Y_, (w * 4, h * 4), interpolation=cv2.INTER_CUBIC)
        print("output shape is ", Y_.shape)

        #후처리 과정

        Y_ = (Y_ + 1) * 127.5
        cv2.imwrite("{}/{}_yval.png".format(OUT_DIR, i), Y_)

        # Evaluate Perf on Simulator
        #rknn.eval_perf()

        # Release RKNN Context
        rknn.release()
コード例 #16
0
ファイル: demo_camera_spilt.py プロジェクト: chenshiqin/mtcnn
 def run(self):  #把要执行的代码写到run函数里面 线程在创建后会直接运行run函数
     minsize = 20
     threshold = [0.8, 0.9, 0.95]
     factor = 0.709
     pnet_rknn_list = init_pnet()
     rnet_rknn = RKNN()
     onet_rknn = RKNN()
     rnet_rknn.load_rknn('./RNet.rknn')
     onet_rknn.load_rknn('./ONet.rknn')
     ret = rnet_rknn.init_runtime()
     if ret != 0:
         #print('Init rnet runtime environment failed')
         exit(ret)
     ret = onet_rknn.init_runtime()
     if ret != 0:
         #print('Init onet runtime environment failed')
         exit(ret)
     sys.stdout = open('/dev/stdout', 'w')
     sys.stderr = open('/dev/stderr', 'w')
     global proflag
     global IMAGE_list
     global boundingbox_list
     nonfacecount = 0
     #wrongimg = 1
     while (proflag == 1):
         imreadLock.acquire()
         img0 = IMAGE_list[0].copy()
         img = IMAGE_list[1].copy()
         imreadLock.release()
         #tic()
         score_cmp = compare_image(img0, img)
         #print("score_cmp",score_cmp)
         #toc()
         if score_cmp < 0.98:
             #imreadLock.release()
             #print("detect face start")
             #cv2.imwrite("aa.jpg",img)
             tic()
             boundingboxes, points = detect_face(img, minsize,
                                                 pnet_rknn_list, rnet_rknn,
                                                 onet_rknn, threshold,
                                                 False, factor)
             #print("boundingboxes shape",boundingboxes.shape)
             print("total cost")
             toc()
             if boundingboxes.shape[0] != 0:
                 if len(boundingbox_list) != 0:
                     boundingbox_list.clear()
                 boundingbox_list.append(boundingboxes)
             else:
                 #path = str(wrongimg)+".jpg"
                 #cv2.imwrite(path,img)
                 #wrongimg += 1
                 nonfacecount += 1
                 if nonfacecount >= 3:
                     boundingbox_list.clear()
                     nonfacecount = 0
     for i in range(2):
         pnet_rknn_list[i].release()
     rnet_rknn.release()
     onet_rknn.release()