コード例 #1
0
def transfer(pb_path, rknn_name):
  # Create RKNN object
  rknn = RKNN()

  # init runtime environment
  print('--> Init runtime environment')
  ret = rknn.init_runtime()
  if ret != 0:
      print('Init runtime environment failed')
      exit(ret)
  print('done')

  # Config for Model Input PreProcess
  rknn.config()

  # Load TensorFlow Model
  print('--> Loading model')
  rknn.load_tensorflow(tf_pb=pb_path,
                       inputs=['Reshape'],
                       outputs=['probability'],
                       input_size_list=[[INPUT_WIDTH, INPUT_HEIGHT, 1]])
  print('done')

  # Build Model
  print('--> Building model')
  rknn.build(do_quantization=False)
  print('done')

  # Export RKNN Model
  rknn.export_rknn(rknn_name)

  # Release RKNN Context
  rknn.release()
コード例 #2
0
def to_rknn(pb_path, rknn_path):
    rknn = RKNN(verbose=True)
    rknn.config(channel_mean_value='0 0 0 1', reorder_channel='0 1 2')
    # rknn.config(channel_mean_value='128 128 128 128', reorder_channel='0 1 2')
    print('--> Loading model')
    rknn.load_tensorflow(tf_pb=pb_path,
                         inputs=['Placeholder'],
                         outputs=['ConvPred/ConvPred'],
                         input_size_list=[[INPUT_HEIGHT, INPUT_WIDTH, 3]])
    print('done')
    print('--> Building model')
    rknn.build(do_quantization=False, pre_compile=True)
    print('done')
    rknn.export_rknn(rknn_path)
コード例 #3
0
def quantify_transfer(pb_name, dataset_name, export_name):
    ret = 0
    print(pb_name, dataset_name, export_name)
    rknn = RKNN()
    rknn.config(channel_mean_value='',
                reorder_channel='',
                quantized_dtype='dynamic_fixed_point-8')
    print('--> Loading model')
    ret = rknn.load_tensorflow(tf_pb=pb_name,
                               inputs=['test/x'],
                               outputs=['test/hypothesis'],
                               input_size_list=[[1, 4]])
    if ret != 0:
        print('load_tensorflow error')
        rknn.release()
        return ret
    print('done')
    print('--> Building model')
    rknn.build(do_quantization=True, dataset=dataset_name)
    print('done')
    # rknn 모델 파일 저장 내보내기
    rknn.export_rknn(export_name)
    # Release RKNN Context
    rknn.release()
    return ret
コード例 #4
0
def common_transfer(pb_name,export_name):
        # 특정 로그를 참조하기위함.
        ret = 0
        rknn = RKNN()
        # 이 단계는 grayscale 이미지를 다룰 때 필요하지 않음.
        # rknn.config(channel_mean_value='', reorder_channel='')
        print('--> Loading model')

        ret = rknn.load_tensorflow(
                tf_pb='./mnist_frozen_graph.pb',
                inputs=['x'],
                outputs=['y_conv'],
                input_size_list=[[28,28,1]])
        if ret != 0:
                print('load_tensorflow error')
                rknn.release()
                return ret
        print('done')
        print('--> Building model')
        rknn.build(do_quantization=False)
        print('done')
        # rknn 모델 파일 저장 내보내기
        rknn.export_rknn('./mnist.rknn')
        # Release RKNN Context
        rknn.release()
        return ret
コード例 #5
0
def to_rknn(pb_path, rknn_path):
    rknn = RKNN(verbose=True)
    rknn.config(channel_mean_value='127.5 127.5 127.5 127.5',
                reorder_channel='2 1 0')
    rknn.load_tensorflow(
        tf_pb=pb_path,
        inputs=['input_tensor'],
        outputs=[
            #'lanenet_model/vgg_backend/binary_seg/ArgMax',
            'lanenet_model/vgg_frontend/vgg16_decode_module/binary_seg_decode/binary_final_logits/binary_final_logits',  # Workaround RKNN 1.1.0 bug
            #'lanenet_model/vgg_backend/binary_seg/Softmax',
            'lanenet_model/vgg_backend/instance_seg/pix_embedding_conv/pix_embedding_conv'
        ],
        input_size_list=[[256, 512, 3]])

    rknn.build(do_quantization=False, dataset='./dataset.txt')
    rknn.export_rknn(rknn_path)
コード例 #6
0
ファイル: rknn_transfer.py プロジェクト: Jeff-Zhao1999/person
def transfer(pb_path, rknn_name):
    # 创建RKNN执行对象
    #rknn = RKNN(verbose=True, verbose_file='./mini_XCEPTION_build.log')
    rknn = RKNN()
# 配置模型输入,用于NPU对数据输入的预处理
# channel_mean_value='0 0 0 255',那么模型推理时,将会对RGB数据做如下转换
# (R - 0)/255, (G - 0)/255, (B - 0)/255。推理时,RKNN模型会自动做均值和归一化处理
# reorder_channel=’0 1 2’用于指定是否调整图像通道顺序,设置成0 1 2即按输入的图像通道顺序不做调整
# reorder_channel=’2 1 0’表示交换0和2通道,如果输入是RGB,将会被调整为BGR。如果是BGR将会被调整为RGB
#图像通道顺序不做调整
    #rknn.config(channel_mean_value='0 0 0 255', reorder_channel='0 1 2')
    rknn.config(quantized_dtype='dynamic_fixed_point-8')
 
# 加载TensorFlow模型
# tf_pb='digital_gesture.pb'指定待转换的TensorFlow模型
# inputs指定模型中的输入节点
# outputs指定模型中输出节点
# input_size_list指定模型输入的大小
    print('--> Loading model')
    ret = rknn.load_tensorflow(tf_pb=pb_path,
                         inputs=['input_1'],
                         outputs=['predictions/Softmax'],
                         input_size_list=[[INPUT_WIDTH, INPUT_HEIGHT, 1]])
    if ret != 0:
        print('Load Model failed!')
        exit(ret)
    print('done')
 
# 创建解析pb模型
# do_quantization=False指定不进行量化
# 量化会减小模型的体积和提升运算速度,但是会有精度的丢失
    print('--> Building model')
    ret = rknn.build(do_quantization=False)
    if ret != 0:
        print('Build Model failed!')
        exit(ret)
    print('done')
 
    # 导出保存rknn模型文件
    print('--> Export RKNN model')
    ret = rknn.export_rknn(rknn_name)
    if ret != 0:
        print('Export Model failed!')
        exit(ret)
    print('done')
 
    # Release RKNN Context
    rknn.release()
コード例 #7
0
    def save_rknn(self,
                  rknnpath,
                  verbose=True,
                  verbose_file=None,
                  input_mean_value='0 0 0 1',
                  input_channels='0 1 2',
                  do_quantization=True,
                  pre_compile=True):
        TMP_PB_PATH = './tmp.pb'
        from rknn.api import RKNN
        self.save_pb(TMP_PB_PATH)

        rknn = RKNN(verbose=verbose, verbose_file=verbose_file)

        print('--> config model')
        rknn.config(channel_mean_value=input_mean_value,
                    reorder_channel=input_channels)
        print('done')

        print('--> Loading pb, input shape = ' + str([self.__input_shape]))
        ret = rknn.load_tensorflow(tf_pb=TMP_PB_PATH,
                                   inputs=[self.input.op.name],
                                   outputs=[self.output.op.name],
                                   input_size_list=[list(self.__input_shape)])
        if ret != 0:
            print('Load pb failed! Ret = {}'.format(ret))
            exit(ret)
        print('done')

        print('--> Building model')
        ret = rknn.build(do_quantization=do_quantization,
                         dataset='./rknn_quantization.txt',
                         pre_compile=pre_compile)
        if ret != 0:
            print('Build model failed!')
            exit(ret)
        print('done')

        print('--> Export RKNN model')
        ret = rknn.export_rknn(rknnpath)
        if ret != 0:
            print('Export rknn failed!')
            exit(ret)
        print('done')

        rknn.release()
コード例 #8
0
def rknn_convert(input_model, output_model, model_input_shape, output_tensor_num, dataset_file, target_platform):
    # Create RKNN object
    rknn = RKNN()
    print('--> config model')
    rknn.config(channel_mean_value='0 0 0 255', reorder_channel='0 1 2', batch_size=1, target_platform=target_platform)

    # Load tensorflow model
    print('--> Loading model')
    if output_tensor_num == 1:
        output_tensor_names = ['predict_conv/BiasAdd']
    elif output_tensor_num == 2:
        output_tensor_names = ['predict_conv_1/BiasAdd', 'predict_conv_2/BiasAdd']
    elif output_tensor_num == 3:
        output_tensor_names = ['predict_conv_1/BiasAdd', 'predict_conv_2/BiasAdd', 'predict_conv_3/BiasAdd']
    else:
        raise ValueError('invalid output tensor number ', output_tensor_num)


    ret = rknn.load_tensorflow(tf_pb=input_model,
                               inputs=['image_input'],
                               outputs=output_tensor_names,
                               input_size_list=[model_input_shape+(3,)],
                               predef_file=None)
    #ret = rknn.load_onnx(model=input_model)
    if ret != 0:
        print('Load failed!')
        exit(ret)

    # Build model
    print('--> Building model')
    ret = rknn.build(do_quantization=True, dataset=dataset_file, pre_compile=True)
    if ret != 0:
        print('Build  failed!')
        exit(ret)

    # Export rknn model
    print('--> Export RKNN model')
    ret = rknn.export_rknn(output_model)
    if ret != 0:
        print('Export .rknn failed!')
        exit(ret)

    # Release RKNN object
    rknn.release()
コード例 #9
0
def rknn_convert(input_model, output_model, model_input_shape, dataset_file,
                 target_platform):
    # Create RKNN object
    rknn = RKNN()
    print('--> config model')
    rknn.config(channel_mean_value='0 0 0 255',
                reorder_channel='0 1 2',
                batch_size=1,
                target_platform=target_platform)

    # Load tensorflow model
    print('--> Loading model')
    ret = rknn.load_tensorflow(tf_pb=input_model,
                               inputs=['image_input'],
                               outputs=['dense/Softmax'],
                               input_size_list=[model_input_shape + (3, )],
                               predef_file=None)
    #ret = rknn.load_onnx(model=input_model)
    if ret != 0:
        print('Load failed!')
        exit(ret)

    # Build model
    print('--> Building model')
    ret = rknn.build(do_quantization=True,
                     dataset=dataset_file,
                     pre_compile=True)
    if ret != 0:
        print('Build  failed!')
        exit(ret)

    # Export rknn model
    print('--> Export RKNN model')
    ret = rknn.export_rknn(output_model)
    if ret != 0:
        print('Export .rknn failed!')
        exit(ret)

    # Release RKNN object
    rknn.release()
コード例 #10
0
from rknn.api import RKNN
#create RKNN object
rknn = RKNN(verbose=True)
print('--> Loading model')
#freeze된 pb파일, input 노드, output노드, input 사이즈 리스트 입력
#노드 확인은 tensorboard, netron을 활용
ret = rknn.load_tensorflow(tf_pb='/home/yoona/Autopilot-TensorFlow/freeze.pb',
                                             inputs=['Placeholder'],
                                             outputs=['add_9'],
                                             input_size_list=[[66,200,3]])

#오류 발생시 오류 메시지 출력
if ret !=0:
        print('Load failed!')
        exit(ret)
#Load 완료시 'done'출력
print('done')



print('--> Building model') #빌드하는 중임을 알려주는 출력
ret = rknn.build(do_quantization=False)
#오류 발생시 오류 메시지 출력
if ret !=0:
        print('Build failed!')
        exit(ret)
#Build 완료시 'done'출력
print('done')


コード例 #11
0
if __name__ == '__main__':

    # Create RKNN object
    rknn = RKNN()

    # Config for Model Input PreProcess
    rknn.config(mean_values=[[127.5, 127.5, 127.5]],
                std_values=[[127.5, 127.5, 127.5]],
                reorder_channel='0 1 2')

    # Load TensorFlow Model
    print('--> Loading model')
    rknn.load_tensorflow(
        tf_pb='./ssd_mobilenet_v1_coco_2017_11_17.pb',
        inputs=[
            'FeatureExtractor/MobilenetV1/MobilenetV1/Conv2d_0/BatchNorm/batchnorm/mul_1'
        ],
        outputs=['concat', 'concat_1'],
        input_size_list=[[INPUT_SIZE, INPUT_SIZE, 3]])
    print('done')

    # Build Model
    print('--> Building model')
    rknn.build(do_quantization=True, dataset='./dataset.txt')
    print('done')

    # Export RKNN Model
    rknn.export_rknn('./ssd_mobilenet_v1_coco.rknn')

    # Direct Load RKNN Model
    # rknn.load_rknn('./ssd_mobilenet_v1_coco.rknn')
コード例 #12
0
from rknn.api import RKNN

#create RKNN object
rknn = RKNN(verbose=True)

#Load model
print('--> Loading model')
ret = rknn.load_tensorflow(tf_pb='./convert/freeze.pb',
                           inputs=['Placeholder'],
                           outputs=['Softmax'],
                           input_size_list=[[2304]])

#오류 발생시 오류 메시지 출력
if ret != 0:
    print('Load failed!')
    exit(ret)
#Load 완료시 'done'출력
print('done')

#Build model
print('--> Building model')  #빌드하는 중임을 알려주는 출력
ret = rknn.build(do_quantization=False)
#오류 발생시 오류 메시지 출력
if ret != 0:
    print('Build failed!')
    exit(ret)
#Build 완료시 'done'출력
print('done')

#Export rknn model
print('--> Export RKNN model')
コード例 #13
0
ファイル: convert.py プロジェクト: Seojiyoung/SMP_forecasting
from rknn.api import RKNN

rknn = RKNN(verbose=True)

#load
ret = rknn.load_tensorflow(tf_pb='./lstm_tanh.pb',
			   inputs=['Placeholder'],
			   outputs=['fully_connected/Identity'],
			   input_size_list=[[ 28, 8]])
if ret !=0:
  print('Load failed!')
  exit(ret)
print('Load success!')

#build
ret = rknn.build(do_quantization=False)
if ret !=0:
	print('Build failed!')
	exit(ret)
print('Build success!')

#export
ret = rknn.export_rknn('./lstm.rknn') 
if ret != 0:
	print('Export failed!')
	exit(ret)
print('Saved model')

コード例 #14
0
ファイル: test.py プロジェクト: rockchip-linux/rknn-toolkit
            shutil.copyfile(pb_file, './inception_v3_quant_frozen.pb')
            shutil.rmtree(target_dir)
            os.remove(download_file)
        print('done')
    # Set model config
    print('--> Config model')
    rknn.config(mean_values=[[127.5, 127.5, 127.5]],
                std_values=[[128, 128, 128]],
                reorder_channel='0 1 2')
    print('done')

    # Load TensorFlow model
    print('--> Loading TensorFlow model')
    ret = rknn.load_tensorflow(tf_pb=PB_FILE,
                               inputs=INPUTS,
                               outputs=OUTPUTS,
                               input_size_list=[[INPUT_SIZE, INPUT_SIZE, 3]],
                               predef_file=None)
    if ret != 0:
        print('Load inception_v3_quant_frozen failed!')
        exit(ret)
    print('done')

    # Build model
    print('--> Building model')
    ret = rknn.build(do_quantization=False)
    if ret != 0:
        print('Build inception_v3_quant_frozen.rknn failed!')
        exit(ret)
    print('done')
コード例 #15
0
def main(folder="test"):
    folder = folder
    files = os.listdir(folder)

    for i in range(len(files)):
        img = cv2.imread("{}/{}".format(folder, files[i]))
        img = (img - 127.5) / 127.5
        h, w = img.shape[:2]
        print("w, h = ", w, h)
        input = cv2.resize(img, (PRESET, PRESET),
                           interpolation=cv2.INTER_CUBIC)
        input = input.reshape(PRESET, PRESET, 3)
        input = np.array(input, dtype=np.float32)

        rknn = RKNN()
        print('--> Loading model')

        #rknn.config(channel_mean_value='0 0 0 255', reorder_channel='0 1 2')

        # Load TensorFlow Model
        print('--> Loading model')
        rknn.load_tensorflow(tf_pb='pretrained/SR_freeze.pb',
                             inputs=['ESRGAN_g/Conv2D'],
                             outputs=['output_image'],
                             input_size_list=[[PRESET, PRESET, 3]])
        print('done')

        # Build Model
        print('--> Building model')
        rknn.build(do_quantization=False)
        print('done')

        # Export RKNN Model
        rknn.export_rknn('./sr_rknn.rknn')

        # Direct Load RKNN Model
        rknn.load_rknn('./sr_rknn.rknn')

        # init runtime environment
        print('--> Init runtime environment')
        ret = rknn.init_runtime()
        if ret != 0:
            print('Init runtime environment failed')

        # Inference
        print('--> Running model')

        output_image = rknn.inference(inputs=[input])
        print('complete')
        out = np.array(output_image, dtype=np.float64)
        print("output_image = ", out.shape)
        out = np.squeeze(out)

        Y_ = out.reshape(PRESET * 4, PRESET * 4, 3)
        Y_ = cv2.resize(Y_, (w * 4, h * 4), interpolation=cv2.INTER_CUBIC)
        print("output shape is ", Y_.shape)

        #후처리 과정

        Y_ = (Y_ + 1) * 127.5
        cv2.imwrite("{}/{}_yval.png".format(OUT_DIR, i), Y_)

        # Evaluate Perf on Simulator
        #rknn.eval_perf()

        # Release RKNN Context
        rknn.release()
コード例 #16
0
predict_days = 30


if __name__ == '__main__':

	rknn = RKNN()
	print('--> Loading model')

	#rknn.config(channel_mean_value='0 0 0 255', reorder_channel='0 1 2')


	# Load TensorFlow Model
	print('--> Loading model')
	rknn.load_tensorflow(tf_pb='./freeze.pb',
                     inputs=['Placeholder'],
                     outputs=['fully_connected/Identity'],
                     input_size_list=[[28, 6]])
	print('done')

	# Build Model
	print('--> Building model')
	rknn.build(do_quantization=False)
	print('done')

	# Export RKNN Model
	rknn.export_rknn('./MTM_LSTM_RKNN.rknn')

	# Direct Load RKNN Model
	rknn.load_rknn('./MTM_LSTM_RKNN.rknn')

	stock_file_name = 'AAPL_1m.csv'
import cv2
import numpy as np
from rknn.api import RKNN

if __name__ == '__main__':

    rknn = RKNN(verbose=False)

    rknn.register_op('./truncatediv/TruncateDiv.rknnop')
    rknn.register_op('./exp/Exp.rknnop')

    rknn.load_tensorflow(tf_pb='./custom_op_math.pb',
                         inputs=['input'],
                         outputs=['exp_0'],
                         input_size_list=[[1, 512]])
    rknn.build(do_quantization=False)
    # rknn.export_rknn('./rknn_test.rknn')

    # rknn.load_rknn('./rknn_test.rknn')

    rknn.init_runtime()

    print("init runtime done")

    in_data = np.full((1, 512), 50.0)
    in_data = in_data.astype(dtype='float32')

    output = rknn.inference(inputs=[in_data])

    print(output)
コード例 #18
0
    print('--> Init runtime environment')
    ret = rknn.init_runtime(host='rk3399pro')
    if ret != 0:
        print('Init runtime environment failed')
        exit(ret)
    print('done')

    # Config for Model Input PreProcess
    rknn.config(channel_mean_value='0 0 0 255', reorder_channel='0 1 2')
    #rknn.config(channel_mean_value='0 0 0 255', reorder_channel='2 1 0')

    # Load TensorFlow Model
    print('--> Loading model')
    rknn.load_tensorflow(
        tf_pb='../digital_gesture_recognition/model_2500/digital_gesture.pb',
        inputs=['input_x'],
        outputs=['probability'],
        input_size_list=[[INPUT_SIZE, INPUT_SIZE, 3]])
    print('done')

    # Build Model
    print('--> Building model')
    rknn.build(do_quantization=False, dataset='./dataset.txt')
    print('done')

    # Export RKNN Model
    rknn.export_rknn('./digital_gesture.rknn')

    # Release RKNN Context
    rknn.release()
コード例 #19
0
ファイル: test.py プロジェクト: jack16888/rknn-toolkit
        if os.path.exists(pb_file):
            shutil.copyfile(pb_file, './inception_v3_quant_frozen.pb')
            shutil.rmtree(target_dir)
            os.remove(download_file)
        print('done')
    # Set model config
    print('--> Config model')
    rknn.config(reorder_channel='0 1 2')
    print('done')

    # Load TensorFlow model
    print('--> Loading TensorFlow model')
    ret = rknn.load_tensorflow(tf_pb=PB_FILE,
                               inputs=INPUTS,
                               outputs=OUTPUTS,
                               input_size_list=[[INPUT_SIZE, INPUT_SIZE, 3]],
                               predef_file=None,
                               mean_values='127.5',
                               std_values='128')
    if ret != 0:
        print('Load inception_v3_quant_frozen failed!')
        exit(ret)
    print('done')

    # Build model
    print('--> Building model')
    ret = rknn.build(do_quantization=False)
    if ret != 0:
        print('Build inception_v3_quant_frozen.rknn failed!')
        exit(ret)
    print('done')
コード例 #20
0
def convert_model(model_path, out_path, pre_compile):
    if os.path.isfile(model_path):
        yaml_config_file = model_path
        model_path = os.path.dirname(yaml_config_file)
    else:
        yaml_config_file = os.path.join(model_path, 'model_config.yml')
    if not os.path.exists(yaml_config_file):
        print('model config % not exist!' % yaml_config_file)
        exit(-1)

    model_configs = parse_model_config(yaml_config_file)

    exported_rknn_model_path_list = []

    for model_name in model_configs['models']:
        model = model_configs['models'][model_name]

        rknn = RKNN()

        rknn.config(**model['configs'])

        print('--> Loading model...')
        if model['platform'] == 'tensorflow':
            model_file_path = os.path.join(model_path,
                                           model['model_file_path'])
            input_size_list = []
            for input_size_str in model['subgraphs']['input-size-list']:
                input_size = list(map(int, input_size_str.split(',')))
                input_size_list.append(input_size)
            pass
            rknn.load_tensorflow(tf_pb=model_file_path,
                                 inputs=model['subgraphs']['inputs'],
                                 outputs=model['subgraphs']['outputs'],
                                 input_size_list=input_size_list)
        elif model['platform'] == 'tflite':
            model_file_path = os.path.join(model_path,
                                           model['model_file_path'])
            rknn.load_tflite(model=model_file_path)
        elif model['platform'] == 'caffe':
            prototxt_file_path = os.path.join(model_path,
                                              model['prototxt_file_path'])
            caffemodel_file_path = os.path.join(model_path,
                                                model['caffemodel_file_path'])
            rknn.load_caffe(model=prototxt_file_path,
                            proto='caffe',
                            blobs=caffemodel_file_path)
        elif model['platform'] == 'onnx':
            model_file_path = os.path.join(model_path,
                                           model['model_file_path'])
            rknn.load_onnx(model=model_file_path)
        else:
            print("platform %s not support!" % (model['platform']))
        print('done')

        if model['quantize']:
            dataset_path = os.path.join(model_path, model['dataset'])
        else:
            dataset_path = './dataset'

        print('--> Build RKNN model...')
        rknn.build(do_quantization=model['quantize'],
                   dataset=dataset_path,
                   pre_compile=pre_compile)
        print('done')

        export_rknn_model_path = "%s.rknn" % (os.path.join(
            out_path, model_name))
        print('--> Export RKNN model to: {}'.format(export_rknn_model_path))
        rknn.export_rknn(export_path=export_rknn_model_path)
        exported_rknn_model_path_list.append(export_rknn_model_path)
        print('done')

    return exported_rknn_model_path_list
コード例 #21
0
from rknn.api import RKNN

#create RKNN object
rknn = RKNN(verbose=True)

#model config
rknn.config(channel_mean_value='103.94 116.78 123.68 58.82',
            reorder_channel='0 1 2',
            need_horizontal_merge=True)
# input data가 3 channel 일때만 이 config 사용
# input이 channel1이면 무시

#Load model
print('--> Loading model')  #로딩하는 중임을 알려주는 출력
ret = rknn.load_tensorflow(tf_pb='./freeze.pb',
                           inputs=['Placeholder'],
                           outputs=['Softmax'],
                           input_size_list=[[2500]])  # w*h(w, h, 1)
#오류 발생시 오류 메시지 출력
if ret != 0:
    print('Load failed!')
    exit(ret)

#Load 완료시 'done'출력
print('Load done')

#Build model
print('--> Buliding model')  # 빌드하려는 중

ret = rknn.build(do_quantization=False)

#Bulid 오류 발생시 오류 메시지 출력
コード例 #22
0
    # channel_mean_value "128 128 128 128" while normalize the image data to range [-1, 1]
    # reorder_channel "0 1 2" will keep the color channel, "2 1 0" will swap the R and B channel,
    # i.e. if the input is BGR loaded by cv2.imread, it will convert it to RGB for the model input.
    # need_horizontal_merge is suggested for inception models (v1/v3/v4).
    rknn.config(
        channel_mean_value="128 128 128 128",
        reorder_channel="0 1 2",
        need_horizontal_merge=True,
        quantized_dtype="asymmetric_quantized-u8",
    )

    # Load tensorflow model
    print("--> Loading model")
    ret = rknn.load_tensorflow(
        tf_pb="./model/frozen_model.pb",
        inputs=INPUT_NODE,
        outputs=OUTPUT_NODE,
        input_size_list=[[img_height, img_height, 3]],
    )
    if ret != 0:
        print("Load inception_v3 failed!")
        exit(ret)

    # Build model
    print("--> Building model")
    # dataset: A input data set for rectifying quantization parameters.
    ret = rknn.build(do_quantization=True, dataset="./dataset.txt")
    if ret != 0:
        print("Build inception_v3 failed!")
        exit(ret)

    # Export rknn model
import cv2
import numpy as np
from rknn.api import RKNN

if __name__ == '__main__':

    rknn = RKNN(verbose=False)

    rknn.register_op('./resize_area/ResizeArea.rknnop')

    rknn.load_tensorflow(tf_pb='./resize_area_test.pb',
                         inputs=['input'],
                         outputs=['resize_area_0'],
                         input_size_list=[[32, 32, 3]])
    rknn.build(do_quantization=False)
    # rknn.export_rknn('./resize_area.rknn')

    # rknn.load_rknn('./resize_area.rknn')

    rknn.init_runtime()

    img = cv2.imread('./dog_32x32.jpg')

    outs = rknn.inference(inputs=[img])

    out_img = outs[0].astype('uint8')
    out_img = np.reshape(out_img, (64, 64, 3))
    cv2.imwrite('./out.jpg', out_img)

コード例 #24
0
ファイル: step1.py プロジェクト: jack16888/rknn-toolkit
    rknn = RKNN()

    # Set model config
    print('--> Config model')
    rknn.config(mean_values=[[127.5, 127.5, 127.5]],
                std_values=[[127.5, 127.5, 127.5]],
                reorder_channel='0 1 2',
                quantized_dtype='asymmetric_quantized-u8',
                batch_size=16)
    print('done')

    # Load tensorflow model
    print('--> Loading model')
    ret = rknn.load_tensorflow(
        tf_pb='./ssd_mobilenet_v2.pb',
        inputs=['FeatureExtractor/MobilenetV2/MobilenetV2/input'],
        outputs=['concat_1', 'concat'],
        input_size_list=[[300, 300, 3]],
        predef_file=None)
    if ret != 0:
        print('Load model failed!')
        exit(ret)
    print('done')

    # Hybrid quantization step1
    print('--> hybrid_quantization_step1')
    ret = rknn.hybrid_quantization_step1(dataset='./dataset.txt')
    if ret != 0:
        print('hybrid_quantization_step1 failed!')
        exit(ret)
    print('done')
コード例 #25
0
fingerprint_size = fingerprint_width * spectrogram_length


# 메인 함수가 시작됩니다
if __name__ == '__main__':

	rknn = RKNN()
	print('--> Loading model')
	
	#rknn.config(channel_mean_value='0 0 0 255', reorder_channel='0 1 2')


	# Load TensorFlow Model
	print('--> Loading model')
	rknn.load_tensorflow(tf_pb='./freeze_conv.pb',
                     inputs=['fingerprint_input'],
                     outputs=['ArgMax'],
                     input_size_list=[[3920]])
	print('done')

	# Build Model
	print('--> Building model')
	rknn.build(do_quantization=False)
	print('done')

	# Export RKNN Model
	rknn.export_rknn('./CNN_RKNN.rknn')
	
	# Direct Load RKNN Model
	rknn.load_rknn('./CNN_RKNN.rknn')

	# init runtime environment