def transfer(pb_path, rknn_name): # Create RKNN object rknn = RKNN() # init runtime environment print('--> Init runtime environment') ret = rknn.init_runtime() if ret != 0: print('Init runtime environment failed') exit(ret) print('done') # Config for Model Input PreProcess rknn.config() # Load TensorFlow Model print('--> Loading model') rknn.load_tensorflow(tf_pb=pb_path, inputs=['Reshape'], outputs=['probability'], input_size_list=[[INPUT_WIDTH, INPUT_HEIGHT, 1]]) print('done') # Build Model print('--> Building model') rknn.build(do_quantization=False) print('done') # Export RKNN Model rknn.export_rknn(rknn_name) # Release RKNN Context rknn.release()
def quantify_transfer(pb_name, dataset_name, export_name): ret = 0 print(pb_name, dataset_name, export_name) rknn = RKNN() rknn.config(channel_mean_value='', reorder_channel='', quantized_dtype='dynamic_fixed_point-8') print('--> Loading model') ret = rknn.load_tensorflow(tf_pb=pb_name, inputs=['test/x'], outputs=['test/hypothesis'], input_size_list=[[1, 4]]) if ret != 0: print('load_tensorflow error') rknn.release() return ret print('done') print('--> Building model') rknn.build(do_quantization=True, dataset=dataset_name) print('done') # rknn 모델 파일 저장 내보내기 rknn.export_rknn(export_name) # Release RKNN Context rknn.release() return ret
def common_transfer(pb_name,export_name): # 특정 로그를 참조하기위함. ret = 0 rknn = RKNN() # 이 단계는 grayscale 이미지를 다룰 때 필요하지 않음. # rknn.config(channel_mean_value='', reorder_channel='') print('--> Loading model') ret = rknn.load_tensorflow( tf_pb='./mnist_frozen_graph.pb', inputs=['x'], outputs=['y_conv'], input_size_list=[[28,28,1]]) if ret != 0: print('load_tensorflow error') rknn.release() return ret print('done') print('--> Building model') rknn.build(do_quantization=False) print('done') # rknn 모델 파일 저장 내보내기 rknn.export_rknn('./mnist.rknn') # Release RKNN Context rknn.release() return ret
def quanlization_darknet_model(model_path, weight_path, dataset_txt, is_quantization=True, pre_compile=False): # 创建RKNN执行对象 rknn = RKNN(verbose=True, verbose_file='verbose.log') rknn.config(channel_mean_value='0 0 0 255', reorder_channel='0 1 2', batch_size=4) flag = rknn.load_darknet(model=model_path, weight=weight_path) # 后面不用修改 if flag == 0: print('load_darknet success') else: print('load_darknet failure') print('done') # 创建解析pb模型 # do_quantization=False指定不进行量化 # 量化会减小模型的体积和提升运算速度,但是会有精度的丢失 print('--> Building model') print(os.getcwd()) # flag = rknn.build(do_quantization=False) if is_quantization == True: flag = rknn.build(do_quantization=True, dataset=dataset_txt, pre_compile=pre_compile) print('do quantization ') # 导出保存rknn模型文件 save_rknn_path = model_dir + '/' + model_path.split('/')[-1].split( '.')[0] + '_quan.rknn' else: flag = rknn.build(do_quantization=False) print('not do quantization') # 导出保存rknn模型文件 save_rknn_path = model_dir + '/' + model_path.split('/')[-1].split( '.')[0] + '.rknn' if flag == 0: print('build success') else: print('build failure') print('done') flag = rknn.export_rknn(save_rknn_path) if flag == 0: print('export success') else: print('export failure') print('done') print(save_rknn_path) # Release RKNN Context rknn.release() print('save_rknn_path:', save_rknn_path)
def to_rknn(pb_path, rknn_path): rknn = RKNN(verbose=True) rknn.config(channel_mean_value='0 0 0 1', reorder_channel='0 1 2') # rknn.config(channel_mean_value='128 128 128 128', reorder_channel='0 1 2') print('--> Loading model') rknn.load_tensorflow(tf_pb=pb_path, inputs=['Placeholder'], outputs=['ConvPred/ConvPred'], input_size_list=[[INPUT_HEIGHT, INPUT_WIDTH, 3]]) print('done') print('--> Building model') rknn.build(do_quantization=False, pre_compile=True) print('done') rknn.export_rknn(rknn_path)
def convert_to_rknn(): from rknn.api import RKNN # Create RKNN object rknn = RKNN(verbose=True) # pre-process config print('--> config model') rknn.config(channel_mean_value='127.5 127.5 127.5 128', reorder_channel='0 1 2') print('done') # Load onnx model print('--> Loading model') ret = rknn.load_onnx(model='lprnet.onnx') if ret != 0: print('Load model failed!') exit(ret) print('done') # Build model print('--> Building model') ret = rknn.build(do_quantization=False, pre_compile=True, dataset='./data/dataset.txt') if ret != 0: print('Build model failed!') exit(ret) print('done') # Export rknn model print('--> Export RKNN model') ret = rknn.export_rknn('./lprnet.rknn') if ret != 0: print('Export model failed!') exit(ret) print('done')
def rknn_convert(input_model, output_model, dataset_file, target_platform): # Create RKNN object rknn = RKNN() print('--> config model') rknn.config(channel_mean_value='127.5 127.5 127.5 127.5', reorder_channel='0 1 2', batch_size=1, target_platform=target_platform) # Load onnx model print('--> Loading model') ret = rknn.load_onnx(model=input_model) if ret != 0: print('Load failed!') exit(ret) # Build model print('--> Building model') ret = rknn.build(do_quantization=True, dataset=dataset_file, pre_compile=True) if ret != 0: print('Build failed!') exit(ret) # Export rknn model print('--> Export RKNN model') ret = rknn.export_rknn(output_model) if ret != 0: print('Export .rknn failed!') exit(ret) # Release RKNN object rknn.release()
def to_rknn(pb_path, rknn_path): rknn = RKNN(verbose=True) rknn.config(channel_mean_value='127.5 127.5 127.5 127.5', reorder_channel='2 1 0') rknn.load_tensorflow( tf_pb=pb_path, inputs=['input_tensor'], outputs=[ #'lanenet_model/vgg_backend/binary_seg/ArgMax', 'lanenet_model/vgg_frontend/vgg16_decode_module/binary_seg_decode/binary_final_logits/binary_final_logits', # Workaround RKNN 1.1.0 bug #'lanenet_model/vgg_backend/binary_seg/Softmax', 'lanenet_model/vgg_backend/instance_seg/pix_embedding_conv/pix_embedding_conv' ], input_size_list=[[256, 512, 3]]) rknn.build(do_quantization=False, dataset='./dataset.txt') rknn.export_rknn(rknn_path)
def keras_to_rknn(cfg_path, h5_path, darknet_path, rknn_path, dataset_path, flag): # ------------------------------------------------------# # h5 -> darknet, 保存成.weights # ------------------------------------------------------# keras_loader = KerasParser(cfg_path, h5_path, darknet_path) for block in keras_loader.block_gen: if 'convolutional' in block['type']: keras_loader.conv(block) keras_loader.close() # ------------------------------------------------------# # darknet -> rknn, 保存成.rknn # ------------------------------------------------------# # Create RKNN object rknn = RKNN() # Load tensorflow model print('--> Loading model') rknn.load_darknet(model=cfg_path, weight=darknet_path) print('done') rknn.config(channel_mean_value='0 0 0 255', reorder_channel='0 1 2', batch_size=1) # Build model print('--> Building model') if flag: rknn.build(do_quantization=True, dataset=dataset_path, pre_compile=True) else: rknn.build(do_quantization=False, pre_compile=True) print('done') # export model print('--> Export model') rknn.export_rknn(rknn_path) print('done')
def transfer(pb_path, rknn_name): # 创建RKNN执行对象 #rknn = RKNN(verbose=True, verbose_file='./mini_XCEPTION_build.log') rknn = RKNN() # 配置模型输入,用于NPU对数据输入的预处理 # channel_mean_value='0 0 0 255',那么模型推理时,将会对RGB数据做如下转换 # (R - 0)/255, (G - 0)/255, (B - 0)/255。推理时,RKNN模型会自动做均值和归一化处理 # reorder_channel=’0 1 2’用于指定是否调整图像通道顺序,设置成0 1 2即按输入的图像通道顺序不做调整 # reorder_channel=’2 1 0’表示交换0和2通道,如果输入是RGB,将会被调整为BGR。如果是BGR将会被调整为RGB #图像通道顺序不做调整 #rknn.config(channel_mean_value='0 0 0 255', reorder_channel='0 1 2') rknn.config(quantized_dtype='dynamic_fixed_point-8') # 加载TensorFlow模型 # tf_pb='digital_gesture.pb'指定待转换的TensorFlow模型 # inputs指定模型中的输入节点 # outputs指定模型中输出节点 # input_size_list指定模型输入的大小 print('--> Loading model') ret = rknn.load_tensorflow(tf_pb=pb_path, inputs=['input_1'], outputs=['predictions/Softmax'], input_size_list=[[INPUT_WIDTH, INPUT_HEIGHT, 1]]) if ret != 0: print('Load Model failed!') exit(ret) print('done') # 创建解析pb模型 # do_quantization=False指定不进行量化 # 量化会减小模型的体积和提升运算速度,但是会有精度的丢失 print('--> Building model') ret = rknn.build(do_quantization=False) if ret != 0: print('Build Model failed!') exit(ret) print('done') # 导出保存rknn模型文件 print('--> Export RKNN model') ret = rknn.export_rknn(rknn_name) if ret != 0: print('Export Model failed!') exit(ret) print('done') # Release RKNN Context rknn.release()
def caffe2rknn(caffe_proto,caffe_weight,rknn_model): print("start export") rknn=RKNN(verbose=True) ret=rknn.load_caffe(model=caffe_proto, proto="caffe", blobs=caffe_weight) rknn.config(channel_mean_value='127.5 127.5 127.5 128.0', reorder_channel='2 1 0', #reorder_channel='0 1 2', #need_horizontal_merge=True ) ret = rknn.build(do_quantization=False) #ret = rknn.build(do_quantization=True) ret=rknn.export_rknn(export_path=rknn_model) print("export finished")
def save_rknn(self, rknnpath, verbose=True, verbose_file=None, input_mean_value='0 0 0 1', input_channels='0 1 2', do_quantization=True, pre_compile=True): TMP_PB_PATH = './tmp.pb' from rknn.api import RKNN self.save_pb(TMP_PB_PATH) rknn = RKNN(verbose=verbose, verbose_file=verbose_file) print('--> config model') rknn.config(channel_mean_value=input_mean_value, reorder_channel=input_channels) print('done') print('--> Loading pb, input shape = ' + str([self.__input_shape])) ret = rknn.load_tensorflow(tf_pb=TMP_PB_PATH, inputs=[self.input.op.name], outputs=[self.output.op.name], input_size_list=[list(self.__input_shape)]) if ret != 0: print('Load pb failed! Ret = {}'.format(ret)) exit(ret) print('done') print('--> Building model') ret = rknn.build(do_quantization=do_quantization, dataset='./rknn_quantization.txt', pre_compile=pre_compile) if ret != 0: print('Build model failed!') exit(ret) print('done') print('--> Export RKNN model') ret = rknn.export_rknn(rknnpath) if ret != 0: print('Export rknn failed!') exit(ret) print('done') rknn.release()
def rknn_convert(input_model, output_model, model_input_shape, output_tensor_num, dataset_file, target_platform): # Create RKNN object rknn = RKNN() print('--> config model') rknn.config(channel_mean_value='0 0 0 255', reorder_channel='0 1 2', batch_size=1, target_platform=target_platform) # Load tensorflow model print('--> Loading model') if output_tensor_num == 1: output_tensor_names = ['predict_conv/BiasAdd'] elif output_tensor_num == 2: output_tensor_names = ['predict_conv_1/BiasAdd', 'predict_conv_2/BiasAdd'] elif output_tensor_num == 3: output_tensor_names = ['predict_conv_1/BiasAdd', 'predict_conv_2/BiasAdd', 'predict_conv_3/BiasAdd'] else: raise ValueError('invalid output tensor number ', output_tensor_num) ret = rknn.load_tensorflow(tf_pb=input_model, inputs=['image_input'], outputs=output_tensor_names, input_size_list=[model_input_shape+(3,)], predef_file=None) #ret = rknn.load_onnx(model=input_model) if ret != 0: print('Load failed!') exit(ret) # Build model print('--> Building model') ret = rknn.build(do_quantization=True, dataset=dataset_file, pre_compile=True) if ret != 0: print('Build failed!') exit(ret) # Export rknn model print('--> Export RKNN model') ret = rknn.export_rknn(output_model) if ret != 0: print('Export .rknn failed!') exit(ret) # Release RKNN object rknn.release()
def rknn_convert(input_model, output_model, model_input_shape, dataset_file, target_platform): # Create RKNN object rknn = RKNN() print('--> config model') rknn.config(channel_mean_value='0 0 0 255', reorder_channel='0 1 2', batch_size=1, target_platform=target_platform) # Load tensorflow model print('--> Loading model') ret = rknn.load_tensorflow(tf_pb=input_model, inputs=['image_input'], outputs=['dense/Softmax'], input_size_list=[model_input_shape + (3, )], predef_file=None) #ret = rknn.load_onnx(model=input_model) if ret != 0: print('Load failed!') exit(ret) # Build model print('--> Building model') ret = rknn.build(do_quantization=True, dataset=dataset_file, pre_compile=True) if ret != 0: print('Build failed!') exit(ret) # Export rknn model print('--> Export RKNN model') ret = rknn.export_rknn(output_model) if ret != 0: print('Export .rknn failed!') exit(ret) # Release RKNN object rknn.release()
# pre-process config print('--> config model') rknn.config(mean_values=[[127.5, 127.5, 127.5]], std_values=[[127.5, 127.5, 127.5]], reorder_channel='0 1 2') print('done') # Load keras model print('--> Loading model') ret = rknn.load_keras(model=KERAS_MODEL_PATH) if ret != 0: print('Load keras model failed!') exit(ret) print('done') # Build model print('--> Building model') ret = rknn.build(do_quantization=True, dataset='./dataset.txt') if ret != 0: print('Build pytorch failed!') exit(ret) print('done') # Export rknn model print('--> Export RKNN model') ret = rknn.export_rknn('./xception.rknn') if ret != 0: print('Export xception.rknn failed!') exit(ret) print('done') # ret = rknn.load_rknn('./xception.rknn')
# pre-process config print('--> config model') rknn.config(channel_mean_value='103.94 116.78 123.68 58.82', reorder_channel='0 1 2') print('done') # Load tensorflow model print('--> Loading model') ret = rknn.load_darknet(model=yolov3_model_cfg, weight=yolov3_weights) if ret != 0: raise Exception('Load darknet yolov3 failed!') print('done') # Build model print('--> Building model') build_timer = timer() # ret = rknn.build(do_quantization=True, dataset='./dataset.txt') # do_quantization:是否对模型进行量化,值为 True 或 False。 ret = rknn.build(do_quantization=False, pre_compile=pre_compile) if ret != 0: raise Exception('Build yolov3 failed!') print('done, time: %.2fs' % (timer() - build_timer)) # Export rknn model print('--> Export RKNN model') ret = rknn.export_rknn(rknn_model) if ret != 0: raise Exception('Export rknn model: %s failed!' % rknn_model) print('done: %s, time: %.2fs' % (rknn_model, timer() - total_timer))
def main(): with open(yaml_file, 'r') as F: config = yaml.load(F) # print('config is:') # print(config) model_type = config['running']['model_type'] print('model_type is {}'.format(model_type)) rknn = RKNN(verbose=True) print('--> config model') rknn.config(**config['config']) print('done') print('--> Loading model') load_function = getattr(rknn, _model_load_dict[model_type]) ret = load_function(**config['parameters'][model_type]) if ret != 0: print('Load mobilenet_v2 failed! Ret = {}'.format(ret)) exit(ret) print('done') #### # print('hybrid_quantization') # ret = rknn.hybrid_quantization_step1(dataset=config['build']['dataset']) if model_type != 'rknn': print('--> Building model') ret = rknn.build(**config['build']) if ret != 0: print('Build mobilenet_v2 failed!') exit(ret) else: print('--> skip Building model step, cause the model is already rknn') if config['running']['export'] is True: print('--> Export RKNN model') ret = rknn.export_rknn(**config['export_rknn']) if ret != 0: print('Init runtime environment failed') exit(ret) else: print('--> skip Export model') if (config['running']['inference'] is True) or (config['running']['eval_perf'] is True): print('--> Init runtime environment') ret = rknn.init_runtime(**config['init_runtime']) if ret != 0: print('Init runtime environment failed') exit(ret) print('--> load img') img = cv2.imread(config['img']['path']) print('img shape is {}'.format(img.shape)) # img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) inputs = [img] if config['running']['inference'] is True: print('--> Running model') config['inference']['inputs'] = inputs #print(config['inference']) outputs = rknn.inference(inputs) #outputs = rknn.inference(config['inference']) print('len of output {}'.format(len(outputs))) print('outputs[0] shape is {}'.format(outputs[0].shape)) print(outputs[0][0][0:2]) else: print('--> skip inference') if config['running']['eval_perf'] is True: print('--> Begin evaluate model performance') config['inference']['inputs'] = inputs perf_results = rknn.eval_perf(inputs=[img]) else: print('--> skip eval_perf') else: print('--> skip inference') print('--> skip eval_perf')
print('--> Loading model') #rknn.config(channel_mean_value='0 0 0 255', reorder_channel='0 1 2') # Load TensorFlow Model print('--> Loading model') rknn.load_tensorflow(tf_pb='./freeze.pb', inputs=['Placeholder'], outputs=['fully_connected/Identity'], input_size_list=[[28, 6]]) print('done') # Build Model print('--> Building model') rknn.build(do_quantization=False) print('done') # Export RKNN Model rknn.export_rknn('./MTM_LSTM_RKNN.rknn') # Direct Load RKNN Model rknn.load_rknn('./MTM_LSTM_RKNN.rknn') stock_file_name = 'AAPL_1m.csv' encoding = 'euc-kr' names = ['Date', 'Open', 'High', 'Low', 'Close', 'Adj Close', 'Volume'] # Read and Delete Axis raw_dataframe = pd.read_csv(stock_file_name, names=names, encoding=encoding) today = raw_dataframe.values[-1, 0]
if __name__ == '__main__': # Create RKNN object rknn = RKNN() # Load tensorflow model print('--> Loading model') rknn.load_darknet(model='./yolov3.cfg', weight="./yolov3.weights") #rknn.load_darknet(model='./yolov3-tiny.cfg', weight="./yolov3-tiny.weights") print('done') rknn.config(channel_mean_value='0 0 0 255', reorder_channel='0 1 2', batch_size=1) # Build model print('--> Building model') #rknn.build(do_quantization=True, dataset='./dataset.txt') rknn.build(do_quantization=True, dataset='./dataset_608x608.txt') print('done') #rknn.export_rknn('./yolov3_tiny.rknn') rknn.export_rknn('./yolov3.rknn') #rknn.load_rknn('./yolov3.rknn') #image = Image.open('./dog.jpg').resize((416, 416)) #rknn.eval_perf(inputs=[image], is_print=True) exit(0)
quantized_dtype='asymmetric_quantized-u8') print('done') # Load tensorflow model print('--> Loading model') ret = rknn.load_tflite( model='../../tflite/mobilenet_v1/mobilenet_v1.tflite') if ret != 0: print('Load mobilenet_v1 failed!') exit(ret) print('done') # Build model print('--> Building model') ret = rknn.build(do_quantization=True, dataset='./dataset.txt', rknn_batch_size=4) if ret != 0: print('Build mobilenet_v1 failed!') exit(ret) print('done') # Export rknn model print('--> Export RKNN model') ret = rknn.export_rknn('./mobilenet_v1.rknn') if ret != 0: print('Export mobilenet_v1.rknn failed!') exit(ret) print('done') # Set inputs
quantized_dtype='dynamic_fixed_point-8') print('done') # Load tensorflow model print('--> Loading model') ret = rknn.load_caffe(model='./PNet.prototxt', proto='caffe', blobs='./PNet.caffemodel') ##csq if ret != 0: print('Load model failed!') exit(ret) print('done') # Build model print('--> Building model') ret = rknn.build(do_quantization=True, dataset='./dataset_pnet_270_207.txt') ####csq csq if ret != 0: print('Build model failed!') exit(ret) print('done') # Export rknn model print('--> Export RKNN model') ret = rknn.export_rknn('./PNet_270_207.rknn') ##csq if ret != 0: print('Export model failed!') exit(ret) print('done') # init runtime environment #print('--> Init runtime environment')
]], std_values=[[ 255.0, 255.0, 255.0, 255.0, 255.0, 255.0, 255.0, 255.0, 255.0, 255.0, 255.0, 255.0 ]], batch_size=opt.batch_size, target_platform="rk3399pro") # reorder_channel='0 1 2', else: rknn.config(channel_mean_value='0 0 0 255', reorder_channel='2 1 0', batch_size=opt.batch_size, target_platform="rk3399pro") # Load tensorflow model print('--> Loading model') ret = rknn.load_onnx(model=ONNX_MODEL) assert ret == 0, "Load onnx failed!" # Build model print('--> Building model') if opt.precompile: ret = rknn.build(do_quantization=True, dataset='./dataset.txt', pre_compile=True) # pre_compile=True else: ret = rknn.build(do_quantization=True, dataset='./dataset.txt') assert ret == 0, "Build onnx failed!" # Export rknn model print('--> Export RKNN model') ret = rknn.export_rknn(RKNN_MODEL) assert ret == 0, "Export %s.rknn failed!" % opt.rknn print('done')
import math import random from rknn.api import RKNN if __name__ == '__main__': # Create RKNN object rknn = RKNN() # Load tensorflow model print('--> Loading model') rknn.load_darknet(model='./yolov3_608x608.cfg', weight="./yolov3.weights") print('done') rknn.config(channel_mean_value='0 0 0 255', reorder_channel='0 1 2', batch_size=1) # Build model print('--> Building model') rknn.build(do_quantization=True, dataset='./dataset_608x608.txt', pre_compile=True) print('done') rknn.export_rknn('./yolov3_608x608.rknn') exit(0)
def convert_model(model_path, out_path, pre_compile): if os.path.isfile(model_path): yaml_config_file = model_path model_path = os.path.dirname(yaml_config_file) else: yaml_config_file = os.path.join(model_path, 'model_config.yml') if not os.path.exists(yaml_config_file): print('model config % not exist!' % yaml_config_file) exit(-1) model_configs = parse_model_config(yaml_config_file) exported_rknn_model_path_list = [] for model_name in model_configs['models']: model = model_configs['models'][model_name] rknn = RKNN() rknn.config(**model['configs']) print('--> Loading model...') if model['platform'] == 'tensorflow': model_file_path = os.path.join(model_path, model['model_file_path']) input_size_list = [] for input_size_str in model['subgraphs']['input-size-list']: input_size = list(map(int, input_size_str.split(','))) input_size_list.append(input_size) pass rknn.load_tensorflow(tf_pb=model_file_path, inputs=model['subgraphs']['inputs'], outputs=model['subgraphs']['outputs'], input_size_list=input_size_list) elif model['platform'] == 'tflite': model_file_path = os.path.join(model_path, model['model_file_path']) rknn.load_tflite(model=model_file_path) elif model['platform'] == 'caffe': prototxt_file_path = os.path.join(model_path, model['prototxt_file_path']) caffemodel_file_path = os.path.join(model_path, model['caffemodel_file_path']) rknn.load_caffe(model=prototxt_file_path, proto='caffe', blobs=caffemodel_file_path) elif model['platform'] == 'onnx': model_file_path = os.path.join(model_path, model['model_file_path']) rknn.load_onnx(model=model_file_path) else: print("platform %s not support!" % (model['platform'])) print('done') if model['quantize']: dataset_path = os.path.join(model_path, model['dataset']) else: dataset_path = './dataset' print('--> Build RKNN model...') rknn.build(do_quantization=model['quantize'], dataset=dataset_path, pre_compile=pre_compile) print('done') export_rknn_model_path = "%s.rknn" % (os.path.join( out_path, model_name)) print('--> Export RKNN model to: {}'.format(export_rknn_model_path)) rknn.export_rknn(export_path=export_rknn_model_path) exported_rknn_model_path_list.append(export_rknn_model_path) print('done') return exported_rknn_model_path_list
print('--> Init runtime environment') ret = rknn.init_runtime(host='rk3399pro') if ret != 0: print('Init runtime environment failed') exit(ret) print('done') # Config for Model Input PreProcess rknn.config(channel_mean_value='0 0 0 255', reorder_channel='0 1 2') #rknn.config(channel_mean_value='0 0 0 255', reorder_channel='2 1 0') # Load TensorFlow Model print('--> Loading model') rknn.load_tensorflow( tf_pb='../digital_gesture_recognition/model_2500/digital_gesture.pb', inputs=['input_x'], outputs=['probability'], input_size_list=[[INPUT_SIZE, INPUT_SIZE, 3]]) print('done') # Build Model print('--> Building model') rknn.build(do_quantization=False, dataset='./dataset.txt') print('done') # Export RKNN Model rknn.export_rknn('./digital_gesture.rknn') # Release RKNN Context rknn.release()
def main(folder="test"): folder = folder files = os.listdir(folder) for i in range(len(files)): img = cv2.imread("{}/{}".format(folder, files[i])) img = (img - 127.5) / 127.5 h, w = img.shape[:2] print("w, h = ", w, h) input = cv2.resize(img, (PRESET, PRESET), interpolation=cv2.INTER_CUBIC) input = input.reshape(PRESET, PRESET, 3) input = np.array(input, dtype=np.float32) rknn = RKNN() print('--> Loading model') #rknn.config(channel_mean_value='0 0 0 255', reorder_channel='0 1 2') # Load TensorFlow Model print('--> Loading model') rknn.load_tensorflow(tf_pb='pretrained/SR_freeze.pb', inputs=['ESRGAN_g/Conv2D'], outputs=['output_image'], input_size_list=[[PRESET, PRESET, 3]]) print('done') # Build Model print('--> Building model') rknn.build(do_quantization=False) print('done') # Export RKNN Model rknn.export_rknn('./sr_rknn.rknn') # Direct Load RKNN Model rknn.load_rknn('./sr_rknn.rknn') # init runtime environment print('--> Init runtime environment') ret = rknn.init_runtime() if ret != 0: print('Init runtime environment failed') # Inference print('--> Running model') output_image = rknn.inference(inputs=[input]) print('complete') out = np.array(output_image, dtype=np.float64) print("output_image = ", out.shape) out = np.squeeze(out) Y_ = out.reshape(PRESET * 4, PRESET * 4, 3) Y_ = cv2.resize(Y_, (w * 4, h * 4), interpolation=cv2.INTER_CUBIC) print("output shape is ", Y_.shape) #후처리 과정 Y_ = (Y_ + 1) * 127.5 cv2.imwrite("{}/{}_yval.png".format(OUT_DIR, i), Y_) # Evaluate Perf on Simulator #rknn.eval_perf() # Release RKNN Context rknn.release()
print('--> Loading model') rknn.load_rknn('./RNN_RKNN.rknn') #rknn.config(channel_mean_value='0 0 0 255', reorder_channel='0 1 2') # Load TensorFlow Model print('--> Loading model') rknn.load_tensorflow(tf_pb='/home/tkdidls/RNN/freeze.pb', inputs=['Placeholder'], outputs=['fully_connected/Identity'], input_size_list=[[28, 6]]) print('done') # Build Model print('--> Building model') rknn.build(do_quantization=False, dataset='./AAPL_5Y_sq.csv') print('done') # Export RKNN Model rknn.export_rknn('./LSTM_RKNN.rknn') # Direct Load RKNN Model rknn.load_rknn('./LSTM_RKNN.rknn') stock_file_name = 'AAPL_5Y.csv' storage = np.genfromtxt(stock_file_name, delimiter=',', dtype=np.float32) storage = np.delete(storage, (0), axis=0) storage = np.delete(storage, (0), axis=1) print(storage)