Ejemplo n.º 1
0
def quantify_transfer(pb_name, dataset_name, export_name):
    ret = 0
    print(pb_name, dataset_name, export_name)
    rknn = RKNN()
    rknn.config(channel_mean_value='',
                reorder_channel='',
                quantized_dtype='dynamic_fixed_point-8')
    print('--> Loading model')
    ret = rknn.load_tensorflow(tf_pb=pb_name,
                               inputs=['test/x'],
                               outputs=['test/hypothesis'],
                               input_size_list=[[1, 4]])
    if ret != 0:
        print('load_tensorflow error')
        rknn.release()
        return ret
    print('done')
    print('--> Building model')
    rknn.build(do_quantization=True, dataset=dataset_name)
    print('done')
    # rknn 모델 파일 저장 내보내기
    rknn.export_rknn(export_name)
    # Release RKNN Context
    rknn.release()
    return ret
Ejemplo n.º 2
0
def transfer(pb_path, rknn_name):
  # Create RKNN object
  rknn = RKNN()

  # init runtime environment
  print('--> Init runtime environment')
  ret = rknn.init_runtime()
  if ret != 0:
      print('Init runtime environment failed')
      exit(ret)
  print('done')

  # Config for Model Input PreProcess
  rknn.config()

  # Load TensorFlow Model
  print('--> Loading model')
  rknn.load_tensorflow(tf_pb=pb_path,
                       inputs=['Reshape'],
                       outputs=['probability'],
                       input_size_list=[[INPUT_WIDTH, INPUT_HEIGHT, 1]])
  print('done')

  # Build Model
  print('--> Building model')
  rknn.build(do_quantization=False)
  print('done')

  # Export RKNN Model
  rknn.export_rknn(rknn_name)

  # Release RKNN Context
  rknn.release()
Ejemplo n.º 3
0
def common_transfer(pb_name,export_name):
        # 특정 로그를 참조하기위함.
        ret = 0
        rknn = RKNN()
        # 이 단계는 grayscale 이미지를 다룰 때 필요하지 않음.
        # rknn.config(channel_mean_value='', reorder_channel='')
        print('--> Loading model')

        ret = rknn.load_tensorflow(
                tf_pb='./mnist_frozen_graph.pb',
                inputs=['x'],
                outputs=['y_conv'],
                input_size_list=[[28,28,1]])
        if ret != 0:
                print('load_tensorflow error')
                rknn.release()
                return ret
        print('done')
        print('--> Building model')
        rknn.build(do_quantization=False)
        print('done')
        # rknn 모델 파일 저장 내보내기
        rknn.export_rknn('./mnist.rknn')
        # Release RKNN Context
        rknn.release()
        return ret
Ejemplo n.º 4
0
def to_rknn(pb_path, rknn_path):
    rknn = RKNN(verbose=True)
    rknn.config(channel_mean_value='0 0 0 1', reorder_channel='0 1 2')
    # rknn.config(channel_mean_value='128 128 128 128', reorder_channel='0 1 2')
    print('--> Loading model')
    rknn.load_tensorflow(tf_pb=pb_path,
                         inputs=['Placeholder'],
                         outputs=['ConvPred/ConvPred'],
                         input_size_list=[[INPUT_HEIGHT, INPUT_WIDTH, 3]])
    print('done')
    print('--> Building model')
    rknn.build(do_quantization=False, pre_compile=True)
    print('done')
    rknn.export_rknn(rknn_path)
Ejemplo n.º 5
0
def convert_to_rknn():
    from rknn.api import RKNN
    # Create RKNN object
    rknn = RKNN(verbose=True)

    # pre-process config
    print('--> config model')
    rknn.config(channel_mean_value='127.5 127.5 127.5 128',
                reorder_channel='0 1 2')
    print('done')

    # Load onnx model
    print('--> Loading model')
    ret = rknn.load_onnx(model='lprnet.onnx')
    if ret != 0:
        print('Load model failed!')
        exit(ret)
    print('done')

    # Build model
    print('--> Building model')
    ret = rknn.build(do_quantization=False, pre_compile=True, dataset='./data/dataset.txt')
    if ret != 0:
        print('Build model failed!')
        exit(ret)
    print('done')

    # Export rknn model
    print('--> Export RKNN model')
    ret = rknn.export_rknn('./lprnet.rknn')
    if ret != 0:
        print('Export model failed!')
        exit(ret)
    print('done')
def rknn_convert(input_model, output_model, dataset_file, target_platform):
    # Create RKNN object
    rknn = RKNN()
    print('--> config model')
    rknn.config(channel_mean_value='127.5 127.5 127.5 127.5', reorder_channel='0 1 2', batch_size=1, target_platform=target_platform)

    # Load onnx model
    print('--> Loading model')
    ret = rknn.load_onnx(model=input_model)
    if ret != 0:
        print('Load failed!')
        exit(ret)

    # Build model
    print('--> Building model')
    ret = rknn.build(do_quantization=True, dataset=dataset_file, pre_compile=True)
    if ret != 0:
        print('Build  failed!')
        exit(ret)

    # Export rknn model
    print('--> Export RKNN model')
    ret = rknn.export_rknn(output_model)
    if ret != 0:
        print('Export .rknn failed!')
        exit(ret)

    # Release RKNN object
    rknn.release()
def to_rknn(pb_path, rknn_path):
    rknn = RKNN(verbose=True)
    rknn.config(channel_mean_value='127.5 127.5 127.5 127.5',
                reorder_channel='2 1 0')
    rknn.load_tensorflow(
        tf_pb=pb_path,
        inputs=['input_tensor'],
        outputs=[
            #'lanenet_model/vgg_backend/binary_seg/ArgMax',
            'lanenet_model/vgg_frontend/vgg16_decode_module/binary_seg_decode/binary_final_logits/binary_final_logits',  # Workaround RKNN 1.1.0 bug
            #'lanenet_model/vgg_backend/binary_seg/Softmax',
            'lanenet_model/vgg_backend/instance_seg/pix_embedding_conv/pix_embedding_conv'
        ],
        input_size_list=[[256, 512, 3]])

    rknn.build(do_quantization=False, dataset='./dataset.txt')
    rknn.export_rknn(rknn_path)
Ejemplo n.º 8
0
def quanlization_darknet_model(model_path,
                               weight_path,
                               dataset_txt,
                               is_quantization=True,
                               pre_compile=False):
    # 创建RKNN执行对象
    rknn = RKNN(verbose=True, verbose_file='verbose.log')
    rknn.config(channel_mean_value='0 0 0 255',
                reorder_channel='0 1 2',
                batch_size=4)
    flag = rknn.load_darknet(model=model_path, weight=weight_path)
    # 后面不用修改
    if flag == 0:
        print('load_darknet success')
    else:
        print('load_darknet failure')
    print('done')
    # 创建解析pb模型
    # do_quantization=False指定不进行量化
    # 量化会减小模型的体积和提升运算速度,但是会有精度的丢失
    print('--> Building model')
    print(os.getcwd())
    # flag = rknn.build(do_quantization=False)
    if is_quantization == True:
        flag = rknn.build(do_quantization=True,
                          dataset=dataset_txt,
                          pre_compile=pre_compile)
        print('do quantization ')
        # 导出保存rknn模型文件
        save_rknn_path = model_dir + '/' + model_path.split('/')[-1].split(
            '.')[0] + '_quan.rknn'
    else:
        flag = rknn.build(do_quantization=False)
        print('not do quantization')
        # 导出保存rknn模型文件
        save_rknn_path = model_dir + '/' + model_path.split('/')[-1].split(
            '.')[0] + '.rknn'

    if flag == 0:
        print('build success')
    else:
        print('build failure')
    print('done')

    flag = rknn.export_rknn(save_rknn_path)
    if flag == 0:
        print('export success')
    else:
        print('export failure')
    print('done')
    print(save_rknn_path)
    # Release RKNN Context
    rknn.release()
    print('save_rknn_path:', save_rknn_path)
Ejemplo n.º 9
0
def keras_to_rknn(cfg_path, h5_path, darknet_path, rknn_path, dataset_path,
                  flag):
    # ------------------------------------------------------#
    #   h5 -> darknet, 保存成.weights
    # ------------------------------------------------------#
    keras_loader = KerasParser(cfg_path, h5_path, darknet_path)

    for block in keras_loader.block_gen:
        if 'convolutional' in block['type']:
            keras_loader.conv(block)
    keras_loader.close()

    # ------------------------------------------------------#
    #   darknet -> rknn, 保存成.rknn
    # ------------------------------------------------------#
    # Create RKNN object
    rknn = RKNN()

    # Load tensorflow model
    print('--> Loading model')
    rknn.load_darknet(model=cfg_path, weight=darknet_path)
    print('done')
    rknn.config(channel_mean_value='0 0 0 255',
                reorder_channel='0 1 2',
                batch_size=1)

    # Build model
    print('--> Building model')
    if flag:
        rknn.build(do_quantization=True,
                   dataset=dataset_path,
                   pre_compile=True)
    else:
        rknn.build(do_quantization=False, pre_compile=True)
    print('done')

    # export model
    print('--> Export model')
    rknn.export_rknn(rknn_path)
    print('done')
Ejemplo n.º 10
0
def transfer(pb_path, rknn_name):
    # 创建RKNN执行对象
    #rknn = RKNN(verbose=True, verbose_file='./mini_XCEPTION_build.log')
    rknn = RKNN()
# 配置模型输入,用于NPU对数据输入的预处理
# channel_mean_value='0 0 0 255',那么模型推理时,将会对RGB数据做如下转换
# (R - 0)/255, (G - 0)/255, (B - 0)/255。推理时,RKNN模型会自动做均值和归一化处理
# reorder_channel=’0 1 2’用于指定是否调整图像通道顺序,设置成0 1 2即按输入的图像通道顺序不做调整
# reorder_channel=’2 1 0’表示交换0和2通道,如果输入是RGB,将会被调整为BGR。如果是BGR将会被调整为RGB
#图像通道顺序不做调整
    #rknn.config(channel_mean_value='0 0 0 255', reorder_channel='0 1 2')
    rknn.config(quantized_dtype='dynamic_fixed_point-8')
 
# 加载TensorFlow模型
# tf_pb='digital_gesture.pb'指定待转换的TensorFlow模型
# inputs指定模型中的输入节点
# outputs指定模型中输出节点
# input_size_list指定模型输入的大小
    print('--> Loading model')
    ret = rknn.load_tensorflow(tf_pb=pb_path,
                         inputs=['input_1'],
                         outputs=['predictions/Softmax'],
                         input_size_list=[[INPUT_WIDTH, INPUT_HEIGHT, 1]])
    if ret != 0:
        print('Load Model failed!')
        exit(ret)
    print('done')
 
# 创建解析pb模型
# do_quantization=False指定不进行量化
# 量化会减小模型的体积和提升运算速度,但是会有精度的丢失
    print('--> Building model')
    ret = rknn.build(do_quantization=False)
    if ret != 0:
        print('Build Model failed!')
        exit(ret)
    print('done')
 
    # 导出保存rknn模型文件
    print('--> Export RKNN model')
    ret = rknn.export_rknn(rknn_name)
    if ret != 0:
        print('Export Model failed!')
        exit(ret)
    print('done')
 
    # Release RKNN Context
    rknn.release()
Ejemplo n.º 11
0
def caffe2rknn(caffe_proto,caffe_weight,rknn_model):
    print("start export")
    rknn=RKNN(verbose=True)
    ret=rknn.load_caffe(model=caffe_proto,
                    proto="caffe",
                    blobs=caffe_weight)

    rknn.config(channel_mean_value='127.5 127.5 127.5 128.0',
		reorder_channel='2 1 0',
                #reorder_channel='0 1 2',
                #need_horizontal_merge=True
                )
    ret = rknn.build(do_quantization=False)
    #ret = rknn.build(do_quantization=True)
    ret=rknn.export_rknn(export_path=rknn_model)
    print("export finished")
Ejemplo n.º 12
0
    def save_rknn(self,
                  rknnpath,
                  verbose=True,
                  verbose_file=None,
                  input_mean_value='0 0 0 1',
                  input_channels='0 1 2',
                  do_quantization=True,
                  pre_compile=True):
        TMP_PB_PATH = './tmp.pb'
        from rknn.api import RKNN
        self.save_pb(TMP_PB_PATH)

        rknn = RKNN(verbose=verbose, verbose_file=verbose_file)

        print('--> config model')
        rknn.config(channel_mean_value=input_mean_value,
                    reorder_channel=input_channels)
        print('done')

        print('--> Loading pb, input shape = ' + str([self.__input_shape]))
        ret = rknn.load_tensorflow(tf_pb=TMP_PB_PATH,
                                   inputs=[self.input.op.name],
                                   outputs=[self.output.op.name],
                                   input_size_list=[list(self.__input_shape)])
        if ret != 0:
            print('Load pb failed! Ret = {}'.format(ret))
            exit(ret)
        print('done')

        print('--> Building model')
        ret = rknn.build(do_quantization=do_quantization,
                         dataset='./rknn_quantization.txt',
                         pre_compile=pre_compile)
        if ret != 0:
            print('Build model failed!')
            exit(ret)
        print('done')

        print('--> Export RKNN model')
        ret = rknn.export_rknn(rknnpath)
        if ret != 0:
            print('Export rknn failed!')
            exit(ret)
        print('done')

        rknn.release()
Ejemplo n.º 13
0
def rknn_convert(input_model, output_model, model_input_shape, output_tensor_num, dataset_file, target_platform):
    # Create RKNN object
    rknn = RKNN()
    print('--> config model')
    rknn.config(channel_mean_value='0 0 0 255', reorder_channel='0 1 2', batch_size=1, target_platform=target_platform)

    # Load tensorflow model
    print('--> Loading model')
    if output_tensor_num == 1:
        output_tensor_names = ['predict_conv/BiasAdd']
    elif output_tensor_num == 2:
        output_tensor_names = ['predict_conv_1/BiasAdd', 'predict_conv_2/BiasAdd']
    elif output_tensor_num == 3:
        output_tensor_names = ['predict_conv_1/BiasAdd', 'predict_conv_2/BiasAdd', 'predict_conv_3/BiasAdd']
    else:
        raise ValueError('invalid output tensor number ', output_tensor_num)


    ret = rknn.load_tensorflow(tf_pb=input_model,
                               inputs=['image_input'],
                               outputs=output_tensor_names,
                               input_size_list=[model_input_shape+(3,)],
                               predef_file=None)
    #ret = rknn.load_onnx(model=input_model)
    if ret != 0:
        print('Load failed!')
        exit(ret)

    # Build model
    print('--> Building model')
    ret = rknn.build(do_quantization=True, dataset=dataset_file, pre_compile=True)
    if ret != 0:
        print('Build  failed!')
        exit(ret)

    # Export rknn model
    print('--> Export RKNN model')
    ret = rknn.export_rknn(output_model)
    if ret != 0:
        print('Export .rknn failed!')
        exit(ret)

    # Release RKNN object
    rknn.release()
Ejemplo n.º 14
0
def rknn_convert(input_model, output_model, model_input_shape, dataset_file,
                 target_platform):
    # Create RKNN object
    rknn = RKNN()
    print('--> config model')
    rknn.config(channel_mean_value='0 0 0 255',
                reorder_channel='0 1 2',
                batch_size=1,
                target_platform=target_platform)

    # Load tensorflow model
    print('--> Loading model')
    ret = rknn.load_tensorflow(tf_pb=input_model,
                               inputs=['image_input'],
                               outputs=['dense/Softmax'],
                               input_size_list=[model_input_shape + (3, )],
                               predef_file=None)
    #ret = rknn.load_onnx(model=input_model)
    if ret != 0:
        print('Load failed!')
        exit(ret)

    # Build model
    print('--> Building model')
    ret = rknn.build(do_quantization=True,
                     dataset=dataset_file,
                     pre_compile=True)
    if ret != 0:
        print('Build  failed!')
        exit(ret)

    # Export rknn model
    print('--> Export RKNN model')
    ret = rknn.export_rknn(output_model)
    if ret != 0:
        print('Export .rknn failed!')
        exit(ret)

    # Release RKNN object
    rknn.release()
Ejemplo n.º 15
0
    add_perm = False # 如果设置成True,则将模型输入layout修改成NHWC
    # Create RKNN object
    rknn = RKNN(verbose=True)

    # pre-process config
    print('--> config model')
    rknn.config(batch_size=1, mean_values=[[0, 0, 0]], std_values=[[255, 255, 255]], reorder_channel='0 1 2', target_platform=[platform], 
                force_builtin_perm=add_perm, output_optimize=1)
    print('done')

    # Load tensorflow model
    print('--> Loading model')
    ret = rknn.load_onnx(model=ONNX_MODEL)
    if ret != 0:
        print('Load resnet50v2 failed!')
        exit(ret)
    print('done')

    # Build model
    print('--> Building model')
    ret = rknn.build(do_quantization=True, dataset='./dataset.txt')
    if ret != 0:
        print('Build resnet50 failed!')
        exit(ret)
    print('done')

    # rknn.export_rknn_precompile_model(RKNN_MODEL)
    rknn.export_rknn(RKNN_MODEL)

    rknn.release()
	# Load TensorFlow Model
	print('--> Loading model')
	rknn.load_tensorflow(tf_pb='./freeze.pb',
                     inputs=['Placeholder'],
                     outputs=['fully_connected/Identity'],
                     input_size_list=[[28, 6]])
	print('done')

	# Build Model
	print('--> Building model')
	rknn.build(do_quantization=False)
	print('done')

	# Export RKNN Model
	rknn.export_rknn('./MTM_LSTM_RKNN.rknn')

	# Direct Load RKNN Model
	rknn.load_rknn('./MTM_LSTM_RKNN.rknn')

	stock_file_name = 'AAPL_1m.csv'
	encoding = 'euc-kr'
	names = ['Date', 'Open', 'High', 'Low', 'Close', 'Adj Close', 'Volume']
	
	# Read and Delete Axis
	raw_dataframe = pd.read_csv(stock_file_name, names=names, encoding=encoding)
	today = raw_dataframe.values[-1, 0]
	del raw_dataframe['Date']
	storage = raw_dataframe.values[1:].astype(np.float32)

	"""
Ejemplo n.º 17
0
if __name__ == '__main__':

    # Create RKNN object
    rknn = RKNN()

    # Load tensorflow model
    print('--> Loading model')
    rknn.load_darknet(model='./yolov3.cfg', weight="./yolov3.weights")
    #rknn.load_darknet(model='./yolov3-tiny.cfg', weight="./yolov3-tiny.weights")

    print('done')

    rknn.config(channel_mean_value='0 0 0 255',
                reorder_channel='0 1 2',
                batch_size=1)

    # Build model
    print('--> Building model')
    #rknn.build(do_quantization=True, dataset='./dataset.txt')
    rknn.build(do_quantization=True, dataset='./dataset_608x608.txt')
    print('done')

    #rknn.export_rknn('./yolov3_tiny.rknn')
    rknn.export_rknn('./yolov3.rknn')

    #rknn.load_rknn('./yolov3.rknn')
    #image = Image.open('./dog.jpg').resize((416, 416))
    #rknn.eval_perf(inputs=[image], is_print=True)

    exit(0)
Ejemplo n.º 18
0
        exit(ret)
    print('done')

    # Build model
    print('--> Building model')
    ret = rknn.build(do_quantization=True,
                     dataset='./dataset.txt',
                     rknn_batch_size=4)
    if ret != 0:
        print('Build mobilenet_v1 failed!')
        exit(ret)
    print('done')

    # Export rknn model
    print('--> Export RKNN model')
    ret = rknn.export_rknn('./mobilenet_v1.rknn')
    if ret != 0:
        print('Export mobilenet_v1.rknn failed!')
        exit(ret)
    print('done')

    # Set inputs
    img = cv2.imread('./dog_224x224.jpg')
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

    img = np.concatenate((img, img, img, img), axis=0)

    # init runtime environment
    print('--> Init runtime environment')
    ret = rknn.init_runtime(target='rk1808')
    if ret != 0:
Ejemplo n.º 19
0
    # Set model config
    print('--> Config model')
    rknn.config(mean_values=[[127.5, 127.5, 127.5]],
                std_values=[[127.5, 127.5, 127.5]],
                reorder_channel='0 1 2',
                batch_size=16)
    print('done')

    # Hybrid quantization step2
    print('--> hybrid_quantization_step2')
    ret = rknn.hybrid_quantization_step2(
        model_input='./ssd_mobilenet_v2.json',
        data_input='./ssd_mobilenet_v2.data',
        model_quantization_cfg='./ssd_mobilenet_v2.quantization.cfg',
        dataset='./dataset.txt')
    if ret != 0:
        print('hybrid_quantization_step2 failed!')
        exit(ret)
    print('done')

    # Export RKNN model
    print('--> Export RKNN model')
    ret = rknn.export_rknn('./ssd_mobilenet_v2.rknn')
    if ret != 0:
        print('Export RKNN model failed!')
        exit(ret)
    print('done')

    rknn.release()
Ejemplo n.º 20
0
        input_size_list=[[300, 300, 3]],
        predef_file=None)
    if ret != 0:
        print('Load model failed!')
        exit(ret)
    print('done')

    # Hybrid quantization step1
    print('--> hybrid_quantization_step1')
    ret = rknn.hybrid_quantization_step1(dataset='./dataset.txt')
    if ret != 0:
        print('hybrid_quantization_step1 failed!')
        exit(ret)
    print('done')

    rknn.export_rknn("ssd_mobilenet_v2.rknn")

    # Tips
    print('Please modify ssd_mobilenet_v2.quantization.cfg!')
    print(
        '=================================================================================================='
    )
    print('Modify method:')
    print(
        'Add {layer_name}: {quantized_dtype} to dict of customized_quantize_layers'
    )
    print(
        '=================================================================================================='
    )
    print('Notes:')
    print(
Ejemplo n.º 21
0
    # pre-process config
    print('--> config model')
    rknn.config(channel_mean_value='103.94 116.78 123.68 58.82',
                reorder_channel='0 1 2')
    print('done')

    # Load tensorflow model
    print('--> Loading model')
    ret = rknn.load_darknet(model=yolov3_model_cfg, weight=yolov3_weights)
    if ret != 0:
        raise Exception('Load darknet yolov3 failed!')
    print('done')

    # Build model
    print('--> Building model')
    build_timer = timer()
    # ret = rknn.build(do_quantization=True, dataset='./dataset.txt')
    # do_quantization:是否对模型进行量化,值为 True 或 False。
    ret = rknn.build(do_quantization=False, pre_compile=pre_compile)
    if ret != 0:
        raise Exception('Build yolov3 failed!')
    print('done, time: %.2fs' % (timer() - build_timer))

    # Export rknn model
    print('--> Export RKNN model')
    ret = rknn.export_rknn(rknn_model)
    if ret != 0:
        raise Exception('Export rknn model: %s failed!' % rknn_model)

    print('done: %s, time: %.2fs' % (rknn_model, timer() - total_timer))
Ejemplo n.º 22
0
    if ret != 0:
        print('Load model failed!')
        exit(ret)
    print('done')

    # Build model
    print('--> Building model')
    ret = rknn.build(do_quantization=True, dataset='./dataset_onet.txt')
    if ret != 0:
        print('Build model failed!')
        exit(ret)
    print('done')

    # Export rknn model
    print('--> Export RKNN model')
    ret = rknn.export_rknn('./ONet.rknn')
    if ret != 0:
        print('Export model failed!')
        exit(ret)
    print('done')

    img = cv2.imread('./onet_48_48_13.jpg')
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    img = np.swapaxes(img, 0, 2)
    #img = cv2.resize(img, (48,48)) # default is bilinear

    tempimg = np.zeros((1, 3, 48, 48))

    # init runtime environment
    #print('--> Init runtime environment')
    ret = rknn.init_runtime()
Ejemplo n.º 23
0
        # ret = rknn.load_onnx(model=MODEL)
        if ret != 0:
            print('Load model failed!')
            exit(ret)
        print('done')
        # Build model
        print('--> Building model')
        ret = rknn.build(do_quantization=True, dataset='./dataset.txt')
        if ret != 0:
            print('Build onnx failed!')
            exit(ret)
        print('done')

        # Export rknn model
        print('--> Export RKNN model {}'.format(RKNN_MODEL_PATH))
        ret = rknn.export_rknn(RKNN_MODEL_PATH)
        if ret != 0:
            print('Export RKNN model failed!')
            exit(ret)
        print('done')
    else:
        # Direct load rknn model
        print('Loading RKNN model {}'.format(RKNN_MODEL_PATH))
        ret = rknn.load_rknn(RKNN_MODEL_PATH)
        if ret != 0:
            print('load rknn model failed.')
            exit(ret)
        print('done')

    if not NEED_RUN_MODEL:
        rknn.release()
Ejemplo n.º 24
0
    if ret != 0:
        print('Load pytorch model failed!')
        exit(ret)
    print('done')

    # Build model
    print('--> Building model')
    ret = rknn.build(do_quantization=True, dataset='./dataset.txt')
    if ret != 0:
        print('Build pytorch failed!')
        exit(ret)
    print('done')

    # Export rknn model
    print('--> Export RKNN model')
    ret = rknn.export_rknn('./resnet_18.rknn')
    if ret != 0:
        print('Export resnet_18.rknn failed!')
        exit(ret)
    print('done')

    ret = rknn.load_rknn('./resnet_18.rknn')

    # Set inputs
    img = cv2.imread('./space_shuttle_224.jpg')
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

    # init runtime environment
    print('--> Init runtime environment')
    ret = rknn.init_runtime()
    if ret != 0:
Ejemplo n.º 25
0
    if ret != 0:
        print('Load interp_test failed! Ret = {}'.format(ret))
        exit(ret)
    print('done')

    # Build model
    print('--> Building model')
    ret = rknn.build(do_quantization=True, dataset='./dataset.txt')
    if ret != 0:
        print('Build interp_test failed!')
        exit(ret)
    print('done')

    # Export rknn model
    print('--> Export RKNN model')
    ret = rknn.export_rknn('./interp_test.rknn')
    if ret != 0:
        print('Export interp_test.rknn failed!')
        exit(ret)
    print('done')

    # Set inputs
    old_img = cv2.imread('./cat.jpg')
    img = cv2.cvtColor(old_img, cv2.COLOR_BGR2RGB)

    print('--> Init runtime environment')
    ret = rknn.init_runtime()
    if ret != 0:
        print('Init runtime environment failed')
        exit(ret)
    print('done')
Ejemplo n.º 26
0
def convert_model(model_path, out_path, pre_compile):
    if os.path.isfile(model_path):
        yaml_config_file = model_path
        model_path = os.path.dirname(yaml_config_file)
    else:
        yaml_config_file = os.path.join(model_path, 'model_config.yml')
    if not os.path.exists(yaml_config_file):
        print('model config % not exist!' % yaml_config_file)
        exit(-1)

    model_configs = parse_model_config(yaml_config_file)

    exported_rknn_model_path_list = []

    for model_name in model_configs['models']:
        model = model_configs['models'][model_name]

        rknn = RKNN()

        rknn.config(**model['configs'])

        print('--> Loading model...')
        if model['platform'] == 'tensorflow':
            model_file_path = os.path.join(model_path,
                                           model['model_file_path'])
            input_size_list = []
            for input_size_str in model['subgraphs']['input-size-list']:
                input_size = list(map(int, input_size_str.split(',')))
                input_size_list.append(input_size)
            pass
            rknn.load_tensorflow(tf_pb=model_file_path,
                                 inputs=model['subgraphs']['inputs'],
                                 outputs=model['subgraphs']['outputs'],
                                 input_size_list=input_size_list)
        elif model['platform'] == 'tflite':
            model_file_path = os.path.join(model_path,
                                           model['model_file_path'])
            rknn.load_tflite(model=model_file_path)
        elif model['platform'] == 'caffe':
            prototxt_file_path = os.path.join(model_path,
                                              model['prototxt_file_path'])
            caffemodel_file_path = os.path.join(model_path,
                                                model['caffemodel_file_path'])
            rknn.load_caffe(model=prototxt_file_path,
                            proto='caffe',
                            blobs=caffemodel_file_path)
        elif model['platform'] == 'onnx':
            model_file_path = os.path.join(model_path,
                                           model['model_file_path'])
            rknn.load_onnx(model=model_file_path)
        else:
            print("platform %s not support!" % (model['platform']))
        print('done')

        if model['quantize']:
            dataset_path = os.path.join(model_path, model['dataset'])
        else:
            dataset_path = './dataset'

        print('--> Build RKNN model...')
        rknn.build(do_quantization=model['quantize'],
                   dataset=dataset_path,
                   pre_compile=pre_compile)
        print('done')

        export_rknn_model_path = "%s.rknn" % (os.path.join(
            out_path, model_name))
        print('--> Export RKNN model to: {}'.format(export_rknn_model_path))
        rknn.export_rknn(export_path=export_rknn_model_path)
        exported_rknn_model_path_list.append(export_rknn_model_path)
        print('done')

    return exported_rknn_model_path_list
Ejemplo n.º 27
0
    if ret != 0:
        print('Load model failed! Ret = {}'.format(ret))
        exit(ret)
    print('done')

    # Build model
    print('--> Building model')
    ret = rknn.build(do_quantization=True, dataset='./dataset.txt')
    if ret != 0:
        print('Build model failed!')
        exit(ret)
    print('done')

    # Export RKNN model
    print('--> Export RKNN model')
    ret = rknn.export_rknn('./deploy_rm_detection_output.rknn')
    if ret != 0:
        print('Export rknn failed!')
        exit(ret)
    print('done')

    # Set inputs
    img = cv2.imread('./road_300x300.jpg')
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

    print('--> Init runtime environment')
    ret = rknn.init_runtime()
    if ret != 0:
        print('Init runtime environment failed')
        exit(ret)
    print('done')
Ejemplo n.º 28
0
#!/usr/bin/env python
# coding: utf-8

# In[ ]:

from rknn.api import RKNN

# Create RKNN object
rknn = RKNN()

# Load TensorFlow Model
print('--> Loading model')
rknn.load_tensorflow(tf_pb='./freeze.pb',
                     inputs=['transpose'],
                     outputs=['fully_connected/Identity'],
                     input_size_list=[[28, 6]])
print('done')

# Build Model
print('--> Building model')
rknn.build(do_quantization=False)
print('done')

# Export RKNN Model
rknn.export_rknn('./RNN_RKNN.rknn')

# Direct Load RKNN Model
# rknn.load_rknn('./ssd_mobilenet_v1_coco.rknn')

rknn.release()
Ejemplo n.º 29
0
    if ret != 0:
        print('Load keras model failed!')
        exit(ret)
    print('done')

    # Build model
    print('--> Building model')
    ret = rknn.build(do_quantization=True, dataset='./dataset.txt')
    if ret != 0:
        print('Build pytorch failed!')
        exit(ret)
    print('done')

    # Export rknn model
    print('--> Export RKNN model')
    ret = rknn.export_rknn('./xception.rknn')
    if ret != 0:
        print('Export xception.rknn failed!')
        exit(ret)
    print('done')

    # ret = rknn.load_rknn('./xception.rknn')

    # Set inputs
    img = cv2.imread(IMG_PATH)
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

    # init runtime environment
    print('--> Init runtime environment')
    ret = rknn.init_runtime()
    #ret = rknn.init_runtime(target='rk1808')
Ejemplo n.º 30
0
import math
import random

from rknn.api import RKNN

if __name__ == '__main__':

    # Create RKNN object
    rknn = RKNN()

    # Load tensorflow model
    print('--> Loading model')
    rknn.load_darknet(model='./yolov3_608x608.cfg', weight="./yolov3.weights")

    print('done')

    rknn.config(channel_mean_value='0 0 0 255',
                reorder_channel='0 1 2',
                batch_size=1)

    # Build model
    print('--> Building model')
    rknn.build(do_quantization=True,
               dataset='./dataset_608x608.txt',
               pre_compile=True)
    print('done')

    rknn.export_rknn('./yolov3_608x608.rknn')

    exit(0)