def rknn_convert(input_model, output_model, dataset_file, target_platform): # Create RKNN object rknn = RKNN() print('--> config model') rknn.config(channel_mean_value='127.5 127.5 127.5 127.5', reorder_channel='0 1 2', batch_size=1, target_platform=target_platform) # Load onnx model print('--> Loading model') ret = rknn.load_onnx(model=input_model) if ret != 0: print('Load failed!') exit(ret) # Build model print('--> Building model') ret = rknn.build(do_quantization=True, dataset=dataset_file, pre_compile=True) if ret != 0: print('Build failed!') exit(ret) # Export rknn model print('--> Export RKNN model') ret = rknn.export_rknn(output_model) if ret != 0: print('Export .rknn failed!') exit(ret) # Release RKNN object rknn.release()
def transfer(pb_path, rknn_name): # Create RKNN object rknn = RKNN() # init runtime environment print('--> Init runtime environment') ret = rknn.init_runtime() if ret != 0: print('Init runtime environment failed') exit(ret) print('done') # Config for Model Input PreProcess rknn.config() # Load TensorFlow Model print('--> Loading model') rknn.load_tensorflow(tf_pb=pb_path, inputs=['Reshape'], outputs=['probability'], input_size_list=[[INPUT_WIDTH, INPUT_HEIGHT, 1]]) print('done') # Build Model print('--> Building model') rknn.build(do_quantization=False) print('done') # Export RKNN Model rknn.export_rknn(rknn_name) # Release RKNN Context rknn.release()
def convert_to_rknn(): from rknn.api import RKNN # Create RKNN object rknn = RKNN(verbose=True) # pre-process config print('--> config model') rknn.config(channel_mean_value='127.5 127.5 127.5 128', reorder_channel='0 1 2') print('done') # Load onnx model print('--> Loading model') ret = rknn.load_onnx(model='lprnet.onnx') if ret != 0: print('Load model failed!') exit(ret) print('done') # Build model print('--> Building model') ret = rknn.build(do_quantization=False, pre_compile=True, dataset='./data/dataset.txt') if ret != 0: print('Build model failed!') exit(ret) print('done') # Export rknn model print('--> Export RKNN model') ret = rknn.export_rknn('./lprnet.rknn') if ret != 0: print('Export model failed!') exit(ret) print('done')
def quantify_transfer(pb_name, dataset_name, export_name): ret = 0 print(pb_name, dataset_name, export_name) rknn = RKNN() rknn.config(channel_mean_value='', reorder_channel='', quantized_dtype='dynamic_fixed_point-8') print('--> Loading model') ret = rknn.load_tensorflow(tf_pb=pb_name, inputs=['test/x'], outputs=['test/hypothesis'], input_size_list=[[1, 4]]) if ret != 0: print('load_tensorflow error') rknn.release() return ret print('done') print('--> Building model') rknn.build(do_quantization=True, dataset=dataset_name) print('done') # rknn 모델 파일 저장 내보내기 rknn.export_rknn(export_name) # Release RKNN Context rknn.release() return ret
def quanlization_darknet_model(model_path, weight_path, dataset_txt, is_quantization=True, pre_compile=False): # 创建RKNN执行对象 rknn = RKNN(verbose=True, verbose_file='verbose.log') rknn.config(channel_mean_value='0 0 0 255', reorder_channel='0 1 2', batch_size=4) flag = rknn.load_darknet(model=model_path, weight=weight_path) # 后面不用修改 if flag == 0: print('load_darknet success') else: print('load_darknet failure') print('done') # 创建解析pb模型 # do_quantization=False指定不进行量化 # 量化会减小模型的体积和提升运算速度,但是会有精度的丢失 print('--> Building model') print(os.getcwd()) # flag = rknn.build(do_quantization=False) if is_quantization == True: flag = rknn.build(do_quantization=True, dataset=dataset_txt, pre_compile=pre_compile) print('do quantization ') # 导出保存rknn模型文件 save_rknn_path = model_dir + '/' + model_path.split('/')[-1].split( '.')[0] + '_quan.rknn' else: flag = rknn.build(do_quantization=False) print('not do quantization') # 导出保存rknn模型文件 save_rknn_path = model_dir + '/' + model_path.split('/')[-1].split( '.')[0] + '.rknn' if flag == 0: print('build success') else: print('build failure') print('done') flag = rknn.export_rknn(save_rknn_path) if flag == 0: print('export success') else: print('export failure') print('done') print(save_rknn_path) # Release RKNN Context rknn.release() print('save_rknn_path:', save_rknn_path)
def to_rknn(pb_path, rknn_path): rknn = RKNN(verbose=True) rknn.config(channel_mean_value='0 0 0 1', reorder_channel='0 1 2') # rknn.config(channel_mean_value='128 128 128 128', reorder_channel='0 1 2') print('--> Loading model') rknn.load_tensorflow(tf_pb=pb_path, inputs=['Placeholder'], outputs=['ConvPred/ConvPred'], input_size_list=[[INPUT_HEIGHT, INPUT_WIDTH, 3]]) print('done') print('--> Building model') rknn.build(do_quantization=False, pre_compile=True) print('done') rknn.export_rknn(rknn_path)
def transfer(pb_path, rknn_name): # 创建RKNN执行对象 #rknn = RKNN(verbose=True, verbose_file='./mini_XCEPTION_build.log') rknn = RKNN() # 配置模型输入,用于NPU对数据输入的预处理 # channel_mean_value='0 0 0 255',那么模型推理时,将会对RGB数据做如下转换 # (R - 0)/255, (G - 0)/255, (B - 0)/255。推理时,RKNN模型会自动做均值和归一化处理 # reorder_channel=’0 1 2’用于指定是否调整图像通道顺序,设置成0 1 2即按输入的图像通道顺序不做调整 # reorder_channel=’2 1 0’表示交换0和2通道,如果输入是RGB,将会被调整为BGR。如果是BGR将会被调整为RGB #图像通道顺序不做调整 #rknn.config(channel_mean_value='0 0 0 255', reorder_channel='0 1 2') rknn.config(quantized_dtype='dynamic_fixed_point-8') # 加载TensorFlow模型 # tf_pb='digital_gesture.pb'指定待转换的TensorFlow模型 # inputs指定模型中的输入节点 # outputs指定模型中输出节点 # input_size_list指定模型输入的大小 print('--> Loading model') ret = rknn.load_tensorflow(tf_pb=pb_path, inputs=['input_1'], outputs=['predictions/Softmax'], input_size_list=[[INPUT_WIDTH, INPUT_HEIGHT, 1]]) if ret != 0: print('Load Model failed!') exit(ret) print('done') # 创建解析pb模型 # do_quantization=False指定不进行量化 # 量化会减小模型的体积和提升运算速度,但是会有精度的丢失 print('--> Building model') ret = rknn.build(do_quantization=False) if ret != 0: print('Build Model failed!') exit(ret) print('done') # 导出保存rknn模型文件 print('--> Export RKNN model') ret = rknn.export_rknn(rknn_name) if ret != 0: print('Export Model failed!') exit(ret) print('done') # Release RKNN Context rknn.release()
def caffe2rknn(caffe_proto,caffe_weight,rknn_model): print("start export") rknn=RKNN(verbose=True) ret=rknn.load_caffe(model=caffe_proto, proto="caffe", blobs=caffe_weight) rknn.config(channel_mean_value='127.5 127.5 127.5 128.0', reorder_channel='2 1 0', #reorder_channel='0 1 2', #need_horizontal_merge=True ) ret = rknn.build(do_quantization=False) #ret = rknn.build(do_quantization=True) ret=rknn.export_rknn(export_path=rknn_model) print("export finished")
def to_rknn(pb_path, rknn_path): rknn = RKNN(verbose=True) rknn.config(channel_mean_value='127.5 127.5 127.5 127.5', reorder_channel='2 1 0') rknn.load_tensorflow( tf_pb=pb_path, inputs=['input_tensor'], outputs=[ #'lanenet_model/vgg_backend/binary_seg/ArgMax', 'lanenet_model/vgg_frontend/vgg16_decode_module/binary_seg_decode/binary_final_logits/binary_final_logits', # Workaround RKNN 1.1.0 bug #'lanenet_model/vgg_backend/binary_seg/Softmax', 'lanenet_model/vgg_backend/instance_seg/pix_embedding_conv/pix_embedding_conv' ], input_size_list=[[256, 512, 3]]) rknn.build(do_quantization=False, dataset='./dataset.txt') rknn.export_rknn(rknn_path)
def save_rknn(self, rknnpath, verbose=True, verbose_file=None, input_mean_value='0 0 0 1', input_channels='0 1 2', do_quantization=True, pre_compile=True): TMP_PB_PATH = './tmp.pb' from rknn.api import RKNN self.save_pb(TMP_PB_PATH) rknn = RKNN(verbose=verbose, verbose_file=verbose_file) print('--> config model') rknn.config(channel_mean_value=input_mean_value, reorder_channel=input_channels) print('done') print('--> Loading pb, input shape = ' + str([self.__input_shape])) ret = rknn.load_tensorflow(tf_pb=TMP_PB_PATH, inputs=[self.input.op.name], outputs=[self.output.op.name], input_size_list=[list(self.__input_shape)]) if ret != 0: print('Load pb failed! Ret = {}'.format(ret)) exit(ret) print('done') print('--> Building model') ret = rknn.build(do_quantization=do_quantization, dataset='./rknn_quantization.txt', pre_compile=pre_compile) if ret != 0: print('Build model failed!') exit(ret) print('done') print('--> Export RKNN model') ret = rknn.export_rknn(rknnpath) if ret != 0: print('Export rknn failed!') exit(ret) print('done') rknn.release()
def rknn_convert(input_model, output_model, model_input_shape, output_tensor_num, dataset_file, target_platform): # Create RKNN object rknn = RKNN() print('--> config model') rknn.config(channel_mean_value='0 0 0 255', reorder_channel='0 1 2', batch_size=1, target_platform=target_platform) # Load tensorflow model print('--> Loading model') if output_tensor_num == 1: output_tensor_names = ['predict_conv/BiasAdd'] elif output_tensor_num == 2: output_tensor_names = ['predict_conv_1/BiasAdd', 'predict_conv_2/BiasAdd'] elif output_tensor_num == 3: output_tensor_names = ['predict_conv_1/BiasAdd', 'predict_conv_2/BiasAdd', 'predict_conv_3/BiasAdd'] else: raise ValueError('invalid output tensor number ', output_tensor_num) ret = rknn.load_tensorflow(tf_pb=input_model, inputs=['image_input'], outputs=output_tensor_names, input_size_list=[model_input_shape+(3,)], predef_file=None) #ret = rknn.load_onnx(model=input_model) if ret != 0: print('Load failed!') exit(ret) # Build model print('--> Building model') ret = rknn.build(do_quantization=True, dataset=dataset_file, pre_compile=True) if ret != 0: print('Build failed!') exit(ret) # Export rknn model print('--> Export RKNN model') ret = rknn.export_rknn(output_model) if ret != 0: print('Export .rknn failed!') exit(ret) # Release RKNN object rknn.release()
def keras_to_rknn(cfg_path, h5_path, darknet_path, rknn_path, dataset_path, flag): # ------------------------------------------------------# # h5 -> darknet, 保存成.weights # ------------------------------------------------------# keras_loader = KerasParser(cfg_path, h5_path, darknet_path) for block in keras_loader.block_gen: if 'convolutional' in block['type']: keras_loader.conv(block) keras_loader.close() # ------------------------------------------------------# # darknet -> rknn, 保存成.rknn # ------------------------------------------------------# # Create RKNN object rknn = RKNN() # Load tensorflow model print('--> Loading model') rknn.load_darknet(model=cfg_path, weight=darknet_path) print('done') rknn.config(channel_mean_value='0 0 0 255', reorder_channel='0 1 2', batch_size=1) # Build model print('--> Building model') if flag: rknn.build(do_quantization=True, dataset=dataset_path, pre_compile=True) else: rknn.build(do_quantization=False, pre_compile=True) print('done') # export model print('--> Export model') rknn.export_rknn(rknn_path) print('done')
def rknn_convert(input_model, output_model, model_input_shape, dataset_file, target_platform): # Create RKNN object rknn = RKNN() print('--> config model') rknn.config(channel_mean_value='0 0 0 255', reorder_channel='0 1 2', batch_size=1, target_platform=target_platform) # Load tensorflow model print('--> Loading model') ret = rknn.load_tensorflow(tf_pb=input_model, inputs=['image_input'], outputs=['dense/Softmax'], input_size_list=[model_input_shape + (3, )], predef_file=None) #ret = rknn.load_onnx(model=input_model) if ret != 0: print('Load failed!') exit(ret) # Build model print('--> Building model') ret = rknn.build(do_quantization=True, dataset=dataset_file, pre_compile=True) if ret != 0: print('Build failed!') exit(ret) # Export rknn model print('--> Export RKNN model') ret = rknn.export_rknn(output_model) if ret != 0: print('Export .rknn failed!') exit(ret) # Release RKNN object rknn.release()
from rknn.api import RKNN if __name__ == '__main__': # Create RKNN object rknn = RKNN() # Load tensorflow model print('--> Loading model') rknn.load_darknet(model='./yolov3.cfg', weight="./yolov3.weights") #rknn.load_darknet(model='./yolov3-tiny.cfg', weight="./yolov3-tiny.weights") print('done') rknn.config(channel_mean_value='0 0 0 255', reorder_channel='0 1 2', batch_size=1) # Build model print('--> Building model') #rknn.build(do_quantization=True, dataset='./dataset.txt') rknn.build(do_quantization=True, dataset='./dataset_608x608.txt') print('done') #rknn.export_rknn('./yolov3_tiny.rknn') rknn.export_rknn('./yolov3.rknn') #rknn.load_rknn('./yolov3.rknn') #image = Image.open('./dog.jpg').resize((416, 416)) #rknn.eval_perf(inputs=[image], is_print=True)
def show_perfs(perfs): perfs = 'perfs: {}\n'.format(outputs) print(perfs) if __name__ == '__main__': # Create RKNN object rknn = RKNN(verbose=False) # pre-process config print('--> config model') rknn.config(channel_mean_value='103.94 116.78 123.68 58.82', reorder_channel='0 1 2', quantized_dtype='asymmetric_quantized-u8') print('done') # Load tensorflow model print('--> Loading model') ret = rknn.load_tflite( model='../../tflite/mobilenet_v1/mobilenet_v1.tflite') if ret != 0: print('Load mobilenet_v1 failed!') exit(ret) print('done') # Build model print('--> Building model') ret = rknn.build(do_quantization=True,
from rknn.api import RKNN if __name__ == '__main__': # Create RKNN object rknn = RKNN() # Set model config print('--> Config model') rknn.config(mean_values=[[127.5, 127.5, 127.5]], std_values=[[127.5, 127.5, 127.5]], reorder_channel='0 1 2', batch_size=16) print('done') # Hybrid quantization step2 print('--> hybrid_quantization_step2') ret = rknn.hybrid_quantization_step2( model_input='./ssd_mobilenet_v2.json', data_input='./ssd_mobilenet_v2.data', model_quantization_cfg='./ssd_mobilenet_v2.quantization.cfg', dataset='./dataset.txt') if ret != 0: print('hybrid_quantization_step2 failed!') exit(ret) print('done') # Export RKNN model print('--> Export RKNN model') ret = rknn.export_rknn('./ssd_mobilenet_v2.rknn') if ret != 0:
import numpy as np import cv2 from rknn.api import RKNN if __name__ == '__main__': # Create RKNN object rknn = RKNN() # Set model config print('--> Config model') rknn.config(mean_values=[[127.5, 127.5, 127.5]], std_values=[[127.5, 127.5, 127.5]], reorder_channel='0 1 2', quantized_dtype='asymmetric_quantized-u8', batch_size=16) print('done') # Load tensorflow model print('--> Loading model') ret = rknn.load_tensorflow( tf_pb='./ssd_mobilenet_v2.pb', inputs=['FeatureExtractor/MobilenetV2/MobilenetV2/input'], outputs=['concat_1', 'concat'], input_size_list=[[300, 300, 3]], predef_file=None) if ret != 0: print('Load model failed!') exit(ret) print('done')
yolo_type = 'tiny' if len(sys.argv) < 2 else sys.argv[1] shutil.copy("settings_%s.py" % yolo_type, "settings.py") if __name__ == '__main__': from rknn.api import RKNN from timeit import default_timer as timer from settings import yolov3_weights, yolov3_model_cfg, rknn_model, \ pre_compile # Create RKNN object total_timer = timer() rknn = RKNN() # pre-process config print('--> config model') rknn.config(channel_mean_value='103.94 116.78 123.68 58.82', reorder_channel='0 1 2') print('done') # Load tensorflow model print('--> Loading model') ret = rknn.load_darknet(model=yolov3_model_cfg, weight=yolov3_weights) if ret != 0: raise Exception('Load darknet yolov3 failed!') print('done') # Build model print('--> Building model') build_timer = timer() # ret = rknn.build(do_quantization=True, dataset='./dataset.txt') # do_quantization:是否对模型进行量化,值为 True 或 False。 ret = rknn.build(do_quantization=False, pre_compile=pre_compile)
w = max(0, xmax - xmin) h = max(0, ymax - ymin) area = w * h if area > 0: return True return False if __name__ == "__main__": VIDEO_PATH = sys.argv[1] # Create RKNN object rknn = RKNN(verbose=True) # pre-process config print('--> config model') rknn.config(reorder_channel='0 1 2', channel_mean_value='0 0 0 255') print('done') if NEED_BUILD_MODEL: # Load pytorch model print('--> Loading model {}'.format(MODEL)) ret = rknn.load_darknet(model='/home/labasus/DataDisk/luqiao/deep_sort_yolov3/yolov3.cfg', weight=MODEL) # ret = rknn.load_onnx(model=MODEL) if ret != 0: print('Load model failed!') exit(ret) print('done') # Build model print('--> Building model') ret = rknn.build(do_quantization=True, dataset='./dataset.txt') if ret != 0:
ONNX_MODEL = './onnx_models/yolov5s_rm_transpose.onnx' # platform="rk1808" platform = "rv1109" RKNN_MODEL = 'yolov5s_relu_{}_out_opt.rknn'.format(platform) if __name__ == '__main__': add_perm = False # 如果设置成True,则将模型输入layout修改成NHWC # Create RKNN object rknn = RKNN(verbose=True) # pre-process config print('--> config model') rknn.config(batch_size=1, mean_values=[[0, 0, 0]], std_values=[[255, 255, 255]], reorder_channel='0 1 2', target_platform=[platform], force_builtin_perm=add_perm, output_optimize=1) print('done') # Load tensorflow model print('--> Loading model') ret = rknn.load_onnx(model=ONNX_MODEL) if ret != 0: print('Load resnet50v2 failed!') exit(ret) print('done') # Build model print('--> Building model') ret = rknn.build(do_quantization=True, dataset='./dataset.txt') if ret != 0: print('Build resnet50 failed!')
import numpy as np import cv2 from rknn.api import RKNN if __name__ == '__main__': # Create RKNN object rknn = RKNN() # pre-process config print('--> config model') rknn.config(channel_mean_value='127.5 127.5 127.5 128', reorder_channel='2 1 0', quantized_dtype='dynamic_fixed_point-8') print('done') # Load tensorflow model print('--> Loading model') ret = rknn.load_caffe(model='./ONet.prototxt', proto='caffe', blobs='./ONet.caffemodel') if ret != 0: print('Load model failed!') exit(ret) print('done') # Build model print('--> Building model') ret = rknn.build(do_quantization=True, dataset='./dataset_onet.txt') if ret != 0: print('Build model failed!')
b[idx] = label_colors[l, 2] rgb = np.stack([r, g, b], axis=2) return rgb if __name__ == '__main__': export_mxnet_model() # Create RKNN object rknn = RKNN() # pre-process config print('--> Config model') rknn.config(mean_values=[[123.675, 116.28, 103.53]], std_values=[[57.63, 57.63, 57.63]], reorder_channel='0 1 2') print('done') # Load mxnet model symbol = './fcn_resnet101_voc-symbol.json' params = './fcn_resnet101_voc-0000.params' input_size_list = [[3, 480, 480]] print('--> Loading model') ret = rknn.load_mxnet(symbol, params, input_size_list) if ret != 0: print('Load mxnet model failed!') exit(ret) print('done') # Build model print('--> Building model')
import numpy as np import cv2 from rknn.api import RKNN if __name__ == '__main__': # Create RKNN object rknn = RKNN() # pre-process config print('--> config model') rknn.config(channel_mean_value='0 0 0 1', reorder_channel='2 1 0') print('done') # Load tensorflow model print('--> Loading model') ret = rknn.load_caffe(model='./deploy.prototxt', proto='caffe', blobs='./solver_iter_45.caffemodel') if ret != 0: print('Load interp_test failed! Ret = {}'.format(ret)) exit(ret) print('done') # Build model print('--> Building model') ret = rknn.build(do_quantization=True, dataset='./dataset.txt') if ret != 0: print('Build interp_test failed!') exit(ret) print('done')
return np.exp(x)/sum(np.exp(x)) if __name__ == '__main__': export_pytorch_model() model = './resnet18.pt' input_size_list = [[3,224,224]] # Create RKNN object rknn = RKNN() # pre-process config print('--> config model') rknn.config(channel_mean_value='123.675 116.28 103.53 58.395', reorder_channel='0 1 2') print('done') # Load pytorch model print('--> Loading model') ret = rknn.load_pytorch(model=model, input_size_list=input_size_list) if ret != 0: print('Load pytorch model failed!') exit(ret) print('done') # Build model print('--> Building model') ret = rknn.build(do_quantization=True, dataset='./dataset.txt') if ret != 0: print('Build pytorch failed!')
import numpy as np import cv2 from rknn.api import RKNN if __name__ == '__main__': # Create RKNN object rknn = RKNN() # pre-process config print('--> config model') rknn.config(mean_values=[[0, 0, 0]], std_values=[[1, 1, 1]], reorder_channel='2 1 0') print('done') # Load tensorflow model print('--> Loading model') ret = rknn.load_caffe(model='./deploy.prototxt', proto='caffe', blobs='./solver_iter_45.caffemodel') if ret != 0: print('Load interp_test failed! Ret = {}'.format(ret)) exit(ret) print('done') # Build model print('--> Building model') ret = rknn.build(do_quantization=True, dataset='./dataset.txt') if ret != 0: print('Build interp_test failed!')
if value > 0: topi = '{}: {}\n'.format(index[j], value) else: topi = '-1: 0.0\n' top5_str += topi print(top5_str) if __name__ == '__main__': # Create RKNN object rknn = RKNN() # pre-process config print('--> config model') rknn.config(channel_mean_value='127.5 127.5 127.5 127.5', reorder_channel='0 1 2') print('done') # Load tensorflow model print('--> Loading model') ret = rknn.load_tflite(model='./mobilenet_v1.tflite') if ret != 0: print('Load mobilenet_v1 failed!') exit(ret) print('done') # Build model print('--> Building model') ret = rknn.build(do_quantization=True, dataset='./dataset.txt', pre_compile=False) if ret != 0: print('Build mobilenet_v1 failed!')
if __name__ == '__main__': if not os.path.exists('./VGG_VOC0712_SSD_300x300_iter_120000.caffemodel'): print('!!! Missing VGG_VOC0712_SSD_300x300_iter_120000.caffemodel !!!\n' \ '1. Download models_VGGNet_VOC0712_SSD_300x300.tar.gz from https://drive.google.com/file/d/0BzKzrI_SkD1_WVVTSmQxU0dVRzA/view\n' \ '2. Extract the VGG_VOC0712_SSD_300x300_iter_120000.caffemodel from models_VGGNet_VOC0712_SSD_300x300.tar.gz\n' \ '3. Or you can also download caffemodel from https://eyun.baidu.com/s/3jJhPRzo , password is rknn\n') exit(-1) # Create RKNN object rknn = RKNN(verbose=False) # Set model config print('--> Config model') rknn.config(mean_values=[[103.94, 116.78, 123.68]], std_values=[[1, 1, 1]], reorder_channel='2 1 0') print('done') # Load caffe model print('--> Loading model') ret = rknn.load_caffe( model='./deploy_rm_detection_output.prototxt', proto='caffe', blobs='./VGG_VOC0712_SSD_300x300_iter_120000.caffemodel') if ret != 0: print('Load model failed! Ret = {}'.format(ret)) exit(ret) print('done') # Build model
def convert_model(model_path, out_path, pre_compile): if os.path.isfile(model_path): yaml_config_file = model_path model_path = os.path.dirname(yaml_config_file) else: yaml_config_file = os.path.join(model_path, 'model_config.yml') if not os.path.exists(yaml_config_file): print('model config % not exist!' % yaml_config_file) exit(-1) model_configs = parse_model_config(yaml_config_file) exported_rknn_model_path_list = [] for model_name in model_configs['models']: model = model_configs['models'][model_name] rknn = RKNN() rknn.config(**model['configs']) print('--> Loading model...') if model['platform'] == 'tensorflow': model_file_path = os.path.join(model_path, model['model_file_path']) input_size_list = [] for input_size_str in model['subgraphs']['input-size-list']: input_size = list(map(int, input_size_str.split(','))) input_size_list.append(input_size) pass rknn.load_tensorflow(tf_pb=model_file_path, inputs=model['subgraphs']['inputs'], outputs=model['subgraphs']['outputs'], input_size_list=input_size_list) elif model['platform'] == 'tflite': model_file_path = os.path.join(model_path, model['model_file_path']) rknn.load_tflite(model=model_file_path) elif model['platform'] == 'caffe': prototxt_file_path = os.path.join(model_path, model['prototxt_file_path']) caffemodel_file_path = os.path.join(model_path, model['caffemodel_file_path']) rknn.load_caffe(model=prototxt_file_path, proto='caffe', blobs=caffemodel_file_path) elif model['platform'] == 'onnx': model_file_path = os.path.join(model_path, model['model_file_path']) rknn.load_onnx(model=model_file_path) else: print("platform %s not support!" % (model['platform'])) print('done') if model['quantize']: dataset_path = os.path.join(model_path, model['dataset']) else: dataset_path = './dataset' print('--> Build RKNN model...') rknn.build(do_quantization=model['quantize'], dataset=dataset_path, pre_compile=pre_compile) print('done') export_rknn_model_path = "%s.rknn" % (os.path.join( out_path, model_name)) print('--> Export RKNN model to: {}'.format(export_rknn_model_path)) rknn.export_rknn(export_path=export_rknn_model_path) exported_rknn_model_path_list.append(export_rknn_model_path) print('done') return exported_rknn_model_path_list
else: topi = '-1: 0.0\n' top5_str += topi print(top5_str) if __name__ == '__main__': export_keras_model() # Create RKNN object rknn = RKNN() # pre-process config print('--> config model') rknn.config(mean_values=[[127.5, 127.5, 127.5]], std_values=[[127.5, 127.5, 127.5]], reorder_channel='0 1 2') print('done') # Load keras model print('--> Loading model') ret = rknn.load_keras(model=KERAS_MODEL_PATH) if ret != 0: print('Load keras model failed!') exit(ret) print('done') # Build model print('--> Building model') ret = rknn.build(do_quantization=True, dataset='./dataset.txt') if ret != 0: print('Build pytorch failed!')
default=1, help='batch size') opt = parser.parse_args() ONNX_MODEL = opt.onnx if opt.rknn: RKNN_MODEL = opt.rknn else: RKNN_MODEL = "%s.rknn" % os.path.splitext(ONNX_MODEL)[0] rknn = RKNN() print('--> config model') if opt.original: rknn.config(mean_values=[[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ]], std_values=[[ 255.0, 255.0, 255.0, 255.0, 255.0, 255.0, 255.0, 255.0, 255.0, 255.0, 255.0, 255.0 ]], batch_size=opt.batch_size, target_platform="rk3399pro") # reorder_channel='0 1 2', else: rknn.config(channel_mean_value='0 0 0 255', reorder_channel='2 1 0', batch_size=opt.batch_size, target_platform="rk3399pro") # Load tensorflow model print('--> Loading model') ret = rknn.load_onnx(model=ONNX_MODEL) assert ret == 0, "Load onnx failed!" # Build model print('--> Building model')