def init_rknn(rknn_path): rknn = RKNN(verbose=True) ret = rknn.load_rknn(rknn_path) if ret != 0: print('Load RKNN model failed') exit(ret) print('starting init runtime ...') rknn.init_runtime(target='rk1808', perf_debug=True) return rknn
def load_model(): rknn = RKNN() print('-->loading model') rknn.load_rknn('./mobilenet_v2.rknn') print('-->Init runtime environment') ret = rknn.init_runtime(target='rk3399pro') if ret != 0: print('Init runtime enviroment failed') exit(ret) return rknn
def rknn_convert(input_model, output_model, dataset_file, target_platform): # Create RKNN object rknn = RKNN() print('--> config model') rknn.config(channel_mean_value='127.5 127.5 127.5 127.5', reorder_channel='0 1 2', batch_size=1, target_platform=target_platform) # Load onnx model print('--> Loading model') ret = rknn.load_onnx(model=input_model) if ret != 0: print('Load failed!') exit(ret) # Build model print('--> Building model') ret = rknn.build(do_quantization=True, dataset=dataset_file, pre_compile=True) if ret != 0: print('Build failed!') exit(ret) # Export rknn model print('--> Export RKNN model') ret = rknn.export_rknn(output_model) if ret != 0: print('Export .rknn failed!') exit(ret) # Release RKNN object rknn.release()
def quanlization_darknet_model(model_path, weight_path, dataset_txt, is_quantization=True, pre_compile=False): # 创建RKNN执行对象 rknn = RKNN(verbose=True, verbose_file='verbose.log') rknn.config(channel_mean_value='0 0 0 255', reorder_channel='0 1 2', batch_size=4) flag = rknn.load_darknet(model=model_path, weight=weight_path) # 后面不用修改 if flag == 0: print('load_darknet success') else: print('load_darknet failure') print('done') # 创建解析pb模型 # do_quantization=False指定不进行量化 # 量化会减小模型的体积和提升运算速度,但是会有精度的丢失 print('--> Building model') print(os.getcwd()) # flag = rknn.build(do_quantization=False) if is_quantization == True: flag = rknn.build(do_quantization=True, dataset=dataset_txt, pre_compile=pre_compile) print('do quantization ') # 导出保存rknn模型文件 save_rknn_path = model_dir + '/' + model_path.split('/')[-1].split( '.')[0] + '_quan.rknn' else: flag = rknn.build(do_quantization=False) print('not do quantization') # 导出保存rknn模型文件 save_rknn_path = model_dir + '/' + model_path.split('/')[-1].split( '.')[0] + '.rknn' if flag == 0: print('build success') else: print('build failure') print('done') flag = rknn.export_rknn(save_rknn_path) if flag == 0: print('export success') else: print('export failure') print('done') print(save_rknn_path) # Release RKNN Context rknn.release() print('save_rknn_path:', save_rknn_path)
def load_model(): rknn = RKNN() print('-->loading model') rknn.load_rknn('./pose_deploy_linevec_pre_compile.rknn') print('loading model done') print('--> Init runtime environment') ret = rknn.init_runtime() if ret != 0: print('Init runtime environment failed') exit(ret) print('!!!!!!!!!!!!!!!!!_____________________done___________________!!!!!!!!!!!!!!!!!!!!') return rknn
class RecModel(): def __init__(self,args): self.model_path = args.model_path self.input_size = args.image_size self.threshold = args.threshold self.rknn = RKNN() self.load_model() def load_model(self): ret = self.rknn.load_rknn(self.model_path) if ret != 0: print('load rknn model failed') exit(ret) print('load model success') ret = self.rknn.init_runtime(target="rk3399pro", device_id="TD033101190400338") if ret != 0: print('Init runtime environment failed') exit(ret) print('init runtime success') version = self.rknn.get_sdk_version() print(version) # Inference print('--> Running model') def extract_features(self,img): if img.shape[0] > 112: img = cv2.resize(img, (112, 112), interpolation=cv2.INTER_AREA) if img.shape[0] < 112: img = cv2.resize(img, (112, 112), interpolation=cv2.INTER_CUBIC) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) outputs = self.rknn.inference(inputs=[img])[0] embedding = preprocessing.normalize(outputs).flatten() return embedding def load_facebank(self): self.features = np.load('npy/facebank_mtcnn_rknn_128.npy') self.names = np.load('npy/names_mtcnn_rknn_128.npy') def compare_feature(self,img): feature = self.extract_features(img) diff = np.expand_dims(feature,2) - np.expand_dims(np.transpose(self.features,[1,0]), 0) dist = np.sum(np.power(diff, 2),axis=1) minimum = np.min(dist, axis=1) min_idx = np.argmin(dist,axis=1) min_idx[minimum > self.threshold] = -1 # if no match, set idx to -1 if min_idx == -1: return (np.array([['None']]),np.array([0])) else: return self.names[min_idx], minimum
def init_rknn(rknn_path): rknn = RKNN() ret = rknn.load_rknn(rknn_path) if ret != 0: print('Load RKNN model failed') exit(ret) print('starting init runtime ...') # ret = rknn.init_runtime(target='rk1808', device_id='TS018080000000053') ret = rknn.init_runtime(target='rk1808') if ret != 0: print('Init runtime environment failed') exit(ret) print('done') return rknn
def load_model(): rknn = RKNN() print('-->loading model') rknn.load_rknn('./yolov3_tiny.rknn') #rknn.load_rknn('./yolov3.rknn') print('loading model done') print('--> Init runtime environment') ret = rknn.init_runtime() if ret != 0: print('Init runtime environment failed') exit(ret) print('done') return rknn
def to_rknn(pb_path, rknn_path): rknn = RKNN(verbose=True) rknn.config(channel_mean_value='0 0 0 1', reorder_channel='0 1 2') # rknn.config(channel_mean_value='128 128 128 128', reorder_channel='0 1 2') print('--> Loading model') rknn.load_tensorflow(tf_pb=pb_path, inputs=['Placeholder'], outputs=['ConvPred/ConvPred'], input_size_list=[[INPUT_HEIGHT, INPUT_WIDTH, 3]]) print('done') print('--> Building model') rknn.build(do_quantization=False, pre_compile=True) print('done') rknn.export_rknn(rknn_path)
def load_rknn_model(PATH): rknn = RKNN() print('--> Loading model') ret = rknn.load_rknn(PATH) if ret != 0: print('load rknn model failed') exit(ret) print('done') ret = rknn.init_runtime() if ret != 0: print('Init runtime environment failed') exit(ret) print('done') return rknn
def init_pnet(): i=0 list = [] for i in range(9): rknn_name = "PNet_%d_%d.rknn" %(PNET_PYRAMID[i][0],PNET_PYRAMID[i][1]); pnet_rknn = RKNN() #verbose=True,verbose_file='./mobilenet_build.log' pnet_rknn.load_rknn(rknn_name) ret = pnet_rknn.init_runtime() if ret != 0: print('Init pnet runtime environment failed') exit(ret) i+=1 list.append(pnet_rknn) return list
def load_model(): rknn = RKNN() print('-->loading model') testtime = timer() rknn.load_rknn(rknn_model) print('loading model done: ', timer()-testtime) print('--> Init runtime environment') testtime = timer() ret = rknn.init_runtime(target='rk1808') if ret != 0: raise Exception('Init runtime environment failed') print('init done: ', timer()-testtime) return rknn
def load_model(model_name): # RKNN 객체 생성 rknn = RKNN() # RKNN 모델 불러 오기 print('-->loading model') rknn.load_rknn(model_name) print('loading model done') # RKNN 런타임 환경 초기화 print('--> Init runtime environment') ret = rknn.init_runtime() if ret != 0: print('Init runtime environment failed') exit(ret) print('done') return rknn
def load_rknn_model(PATH): # Create RKNN object rknn = RKNN() # Load tensorflow model print('--> Loading model') ret = rknn.load_rknn(PATH) if ret != 0: print('load rknn model failed') exit(ret) print('done') ret = rknn.init_runtime(device_id='TS018083200400178', rknn2precompile=True) if ret != 0: print('Init runtime environment failed') exit(ret) print('done') return rknn
def load_model(): # Create RKNN object rknn = RKNN() print('-->loading model') rknn.load_rknn('./digital_gesture.rknn') print('loading model done') # init runtime environment print('--> Init runtime environment') ret = rknn.init_runtime(host='rk3399pro') if ret != 0: print('Init runtime environment failed') exit(ret) print('done') return rknn
def to_rknn(pb_path, rknn_path): rknn = RKNN(verbose=True) rknn.config(channel_mean_value='127.5 127.5 127.5 127.5', reorder_channel='2 1 0') rknn.load_tensorflow( tf_pb=pb_path, inputs=['input_tensor'], outputs=[ #'lanenet_model/vgg_backend/binary_seg/ArgMax', 'lanenet_model/vgg_frontend/vgg16_decode_module/binary_seg_decode/binary_final_logits/binary_final_logits', # Workaround RKNN 1.1.0 bug #'lanenet_model/vgg_backend/binary_seg/Softmax', 'lanenet_model/vgg_backend/instance_seg/pix_embedding_conv/pix_embedding_conv' ], input_size_list=[[256, 512, 3]]) rknn.build(do_quantization=False, dataset='./dataset.txt') rknn.export_rknn(rknn_path)
def convert_to_rknn(): from rknn.api import RKNN # Create RKNN object rknn = RKNN(verbose=True) # pre-process config print('--> config model') rknn.config(channel_mean_value='127.5 127.5 127.5 128', reorder_channel='0 1 2') print('done') # Load onnx model print('--> Loading model') ret = rknn.load_onnx(model='lprnet.onnx') if ret != 0: print('Load model failed!') exit(ret) print('done') # Build model print('--> Building model') ret = rknn.build(do_quantization=False, pre_compile=True, dataset='./data/dataset.txt') if ret != 0: print('Build model failed!') exit(ret) print('done') # Export rknn model print('--> Export RKNN model') ret = rknn.export_rknn('./lprnet.rknn') if ret != 0: print('Export model failed!') exit(ret) print('done')
def test_rknn(): from rknn.api import RKNN rknn = RKNN() # Load rknn model print('--> Load RKNN model') ret = rknn.load_rknn('lprnet.rknn') if ret != 0: print('Export model failed!') exit(ret) # init runtime environment print('--> Init runtime environment') ret = rknn.init_runtime(target='rk1808') if ret != 0: print('Init runtime environment failed') exit(ret) print('done') # Inference print('--> Running model') image = cv2.imread('data/eval/000256.png') outputs = rknn.inference(inputs=[image]) preds = outputs[0] labels, pred_labels = decode(preds, CHARS) print(labels) print('done') rknn.release()
def load_model(modle_path): # Create RKNN object rknn = RKNN() print("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") print('-->loading model') rknn.load_rknn(modle_path) print('loading model done') # init runtime environment print('--> Init runtime environment') ret = rknn.init_runtime() if ret != 0: print('Init runtime environment failed') exit(ret) print('done') print("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") return rknn
def transfer(pb_path, rknn_name): # 创建RKNN执行对象 #rknn = RKNN(verbose=True, verbose_file='./mini_XCEPTION_build.log') rknn = RKNN() # 配置模型输入,用于NPU对数据输入的预处理 # channel_mean_value='0 0 0 255',那么模型推理时,将会对RGB数据做如下转换 # (R - 0)/255, (G - 0)/255, (B - 0)/255。推理时,RKNN模型会自动做均值和归一化处理 # reorder_channel=’0 1 2’用于指定是否调整图像通道顺序,设置成0 1 2即按输入的图像通道顺序不做调整 # reorder_channel=’2 1 0’表示交换0和2通道,如果输入是RGB,将会被调整为BGR。如果是BGR将会被调整为RGB #图像通道顺序不做调整 #rknn.config(channel_mean_value='0 0 0 255', reorder_channel='0 1 2') rknn.config(quantized_dtype='dynamic_fixed_point-8') # 加载TensorFlow模型 # tf_pb='digital_gesture.pb'指定待转换的TensorFlow模型 # inputs指定模型中的输入节点 # outputs指定模型中输出节点 # input_size_list指定模型输入的大小 print('--> Loading model') ret = rknn.load_tensorflow(tf_pb=pb_path, inputs=['input_1'], outputs=['predictions/Softmax'], input_size_list=[[INPUT_WIDTH, INPUT_HEIGHT, 1]]) if ret != 0: print('Load Model failed!') exit(ret) print('done') # 创建解析pb模型 # do_quantization=False指定不进行量化 # 量化会减小模型的体积和提升运算速度,但是会有精度的丢失 print('--> Building model') ret = rknn.build(do_quantization=False) if ret != 0: print('Build Model failed!') exit(ret) print('done') # 导出保存rknn模型文件 print('--> Export RKNN model') ret = rknn.export_rknn(rknn_name) if ret != 0: print('Export Model failed!') exit(ret) print('done') # Release RKNN Context rknn.release()
def save_rknn(self, rknnpath, verbose=True, verbose_file=None, input_mean_value='0 0 0 1', input_channels='0 1 2', do_quantization=True, pre_compile=True): TMP_PB_PATH = './tmp.pb' from rknn.api import RKNN self.save_pb(TMP_PB_PATH) rknn = RKNN(verbose=verbose, verbose_file=verbose_file) print('--> config model') rknn.config(channel_mean_value=input_mean_value, reorder_channel=input_channels) print('done') print('--> Loading pb, input shape = ' + str([self.__input_shape])) ret = rknn.load_tensorflow(tf_pb=TMP_PB_PATH, inputs=[self.input.op.name], outputs=[self.output.op.name], input_size_list=[list(self.__input_shape)]) if ret != 0: print('Load pb failed! Ret = {}'.format(ret)) exit(ret) print('done') print('--> Building model') ret = rknn.build(do_quantization=do_quantization, dataset='./rknn_quantization.txt', pre_compile=pre_compile) if ret != 0: print('Build model failed!') exit(ret) print('done') print('--> Export RKNN model') ret = rknn.export_rknn(rknnpath) if ret != 0: print('Export rknn failed!') exit(ret) print('done') rknn.release()
def init_pnet(): list = [] rknn_odd_name = "PNet_%d_%d.rknn" % (406, 207) pnet_odd_rknn = RKNN() #verbose=True,verbose_file='./mobilenet_build.log' pnet_odd_rknn.load_rknn(rknn_odd_name) ret = pnet_odd_rknn.init_runtime() if ret != 0: #print('Init pnet runtime environment failed') exit(ret) list.append(pnet_odd_rknn) rknn_even_name = "PNet_%d_%d.rknn" % (289, 147) pnet_even_rknn = RKNN() #verbose=True,verbose_file='./mobilenet_build.log' pnet_even_rknn.load_rknn(rknn_even_name) ret = pnet_even_rknn.init_runtime() if ret != 0: #print('Init pnet runtime environment failed') exit(ret) list.append(pnet_even_rknn) return list
def rknn_convert(input_model, output_model, model_input_shape, output_tensor_num, dataset_file, target_platform): # Create RKNN object rknn = RKNN() print('--> config model') rknn.config(channel_mean_value='0 0 0 255', reorder_channel='0 1 2', batch_size=1, target_platform=target_platform) # Load tensorflow model print('--> Loading model') if output_tensor_num == 1: output_tensor_names = ['predict_conv/BiasAdd'] elif output_tensor_num == 2: output_tensor_names = ['predict_conv_1/BiasAdd', 'predict_conv_2/BiasAdd'] elif output_tensor_num == 3: output_tensor_names = ['predict_conv_1/BiasAdd', 'predict_conv_2/BiasAdd', 'predict_conv_3/BiasAdd'] else: raise ValueError('invalid output tensor number ', output_tensor_num) ret = rknn.load_tensorflow(tf_pb=input_model, inputs=['image_input'], outputs=output_tensor_names, input_size_list=[model_input_shape+(3,)], predef_file=None) #ret = rknn.load_onnx(model=input_model) if ret != 0: print('Load failed!') exit(ret) # Build model print('--> Building model') ret = rknn.build(do_quantization=True, dataset=dataset_file, pre_compile=True) if ret != 0: print('Build failed!') exit(ret) # Export rknn model print('--> Export RKNN model') ret = rknn.export_rknn(output_model) if ret != 0: print('Export .rknn failed!') exit(ret) # Release RKNN object rknn.release()
def rknn_convert(input_model, output_model, model_input_shape, dataset_file, target_platform): # Create RKNN object rknn = RKNN() print('--> config model') rknn.config(channel_mean_value='0 0 0 255', reorder_channel='0 1 2', batch_size=1, target_platform=target_platform) # Load tensorflow model print('--> Loading model') ret = rknn.load_tensorflow(tf_pb=input_model, inputs=['image_input'], outputs=['dense/Softmax'], input_size_list=[model_input_shape + (3, )], predef_file=None) #ret = rknn.load_onnx(model=input_model) if ret != 0: print('Load failed!') exit(ret) # Build model print('--> Building model') ret = rknn.build(do_quantization=True, dataset=dataset_file, pre_compile=True) if ret != 0: print('Build failed!') exit(ret) # Export rknn model print('--> Export RKNN model') ret = rknn.export_rknn(output_model) if ret != 0: print('Export .rknn failed!') exit(ret) # Release RKNN object rknn.release()
def caffe2rknn(caffe_proto,caffe_weight,rknn_model): print("start export") rknn=RKNN(verbose=True) ret=rknn.load_caffe(model=caffe_proto, proto="caffe", blobs=caffe_weight) rknn.config(channel_mean_value='127.5 127.5 127.5 128.0', reorder_channel='2 1 0', #reorder_channel='0 1 2', #need_horizontal_merge=True ) ret = rknn.build(do_quantization=False) #ret = rknn.build(do_quantization=True) ret=rknn.export_rknn(export_path=rknn_model) print("export finished")
def load_init_model(rknn_path, device_id): # 创造一个RKNN对象 rknn = RKNN(verbose=True, verbose_file='verbose.log') rknn.load_rknn(rknn_path) # init runtime environment print('--> Init runtime environment') # 初始化运行环境,主机,比较慢 if device_id == '': ret = rknn.init_runtime() print('device_id') else: ret = rknn.init_runtime(target='rk1808', device_id=device_id) # 计算棒被动模式 # 如果ret不等于0 if ret != 0: # 输出初始化运行环境失败 print('Init runtime environment failed') # 直接退出 rknn.release() return None return rknn
def main(): #create RKNN object rknn = RKNN(verbose=True) #Direct Load RKNN Model rknn.load_rknn('./emotion.rknn') #만들어진 rknn을 로드 print('--> load success') #성공 메세지 출력 result = None #이미지 읽기 input_image = cv2.imread('./data/image/happy.jpg', cv2.IMREAD_COLOR) #esize한 이미지, 가장 큰 얼굴 object detected_face, face_coor = format_image(input_image) #탐지된 이미지가 있다면, if detected_face is not None: #image를 tenxor로 변환 & float32로 변환 (rknn이 float64는 지원하지 않음) "tensor 사이즈는 (1,2304), detected_face는 48X48" tensor = image_to_tensor(detected_face).astype(np.float32) #init runtime environment print('--> Init runtime environment') ret = rknn.init_runtime() #오류 메세지 출력 if ret != 0: print('Init runtime environment failed') #rknn 모델 실행 result = rknn.inference(inputs=[tensor]) print('run success') #list를 array로 변환 #result는 감정 예측 배열 result = np.array(result) #result가 존재하면 if result is not None: #감정 배열이 7개의 값을 가지므로 range(7)의 범위를 가짐 for i in range(7): #감정 배열 중 1인 값이 있다면, if result[0][0][i] == 1: #감정 예측 메세지 출력 print('당신의 감정은 ' + EMOTIONS[i] + '입니다.')
def load_model0(model_path, npu_id): rknn = RKNN() devs = rknn.list_devices() device_id_dict = {} for index, dev_id in enumerate(devs[-1]): if dev_id[:2] != 'TS': device_id_dict[0] = dev_id if dev_id[:2] == 'TS': device_id_dict[1] = dev_id print('-->loading model : ' + model_path) rknn.load_rknn(model_path) print('--> Init runtime environment on: ' + device_id_dict[npu_id]) ret = rknn.init_runtime(device_id=device_id_dict[npu_id]) if ret != 0: print('Init runtime environment failed') exit(ret) print('done') return rknn
def __deal(self, model, post_func): rknn = RKNN() ret = rknn.load_rknn(path=model) # init runtime environment logger.debug('--> Init runtime environment') ret = rknn.init_runtime() if ret != 0: logger.error('Init runtime environment failed') exit(ret) logger.debug('Init done') r_list = [self.rfd] w_list = [self.wfd] e_list = [self.rfd, self.wfd] while True: fd_r_list, fd_w_list, fd_e_list = select.select( r_list, w_list, e_list, select_timeout) if not (fd_r_list or fd_w_list or fd_e_list): continue for rs in fd_r_list: if rs is self.rfd: decimg = self.__recieve_frame() # logger.debug('__recieve_frame: %d' % (len(decimg))) if decimg is None: logger.error('decimg is None') continue outputs = rknn.inference(inputs=[decimg]) data = post_func(outputs) for ws in fd_w_list: if ws is self.wfd: self.__send_result(data) for es in fd_e_list: logger.error("error fd list: %s" % (es)) rknn.release() logger.debug('__deal finish')
if (i + j) >= 5: break if value > 0: topi = '{}: {}\n'.format(index[j], value) else: topi = '-1: 0.0\n' top5_str += topi print(top5_str) if __name__ == '__main__': export_keras_model() # Create RKNN object rknn = RKNN() # pre-process config print('--> config model') rknn.config(mean_values=[[127.5, 127.5, 127.5]], std_values=[[127.5, 127.5, 127.5]], reorder_channel='0 1 2') print('done') # Load keras model print('--> Loading model') ret = rknn.load_keras(model=KERAS_MODEL_PATH) if ret != 0: print('Load keras model failed!') exit(ret) print('done') # Build model