def main(): minsize = 20 threshold = [0.6, 0.7, 0.7] factor = 0.709 pnet_rknn_list=init_pnet() rnet_rknn = RKNN() onet_rknn = RKNN() rnet_rknn.load_rknn('./RNet.rknn') onet_rknn.load_rknn('./ONet.rknn') ret = rnet_rknn.init_runtime() if ret != 0: print('Init rnet runtime environment failed') exit(ret) ret = onet_rknn.init_runtime() if ret != 0: print('Init onet runtime environment failed') exit(ret) #error = [] #f = open(imglistfile, 'r') sys.stdout = open('/dev/stdout', 'w') sys.stderr = open('/dev/stderr', 'w') capture = cv2.VideoCapture(0) while (True): tic() ret, img = capture.read() if ret == True: #img = cv2.imread('./test4.jpg') img = cv2.resize(img,(450,344)) img_matlab = img.copy() img_matlab = cv2.cvtColor(img_matlab, cv2.COLOR_BGR2RGB) toc() print("capture--------------------") #img_matlab = img.copy() #tmp = img_matlab[:,:,2].copy() #img_matlab[:,:,2] = img_matlab[:,:,0] #img_matlab[:,:,0] = tmp tic() # check rgb position boundingboxes, points = detect_face(img_matlab, minsize, pnet_rknn_list, rnet_rknn, onet_rknn, threshold, False, factor) toc() tic() img = drawBoxes(img, boundingboxes) cv2.imshow('img', img) c = cv2.waitKey(5) & 0xff if c==27: break toc() print("imshow--------------------") #if boundingboxes.shape[0] > 0: # error.append[imgpath] #print(error) i=0 for i in range(9): pnet_rknn_list[i].release() rnet_rknn.release() onet_rknn.release() cv2.destroyAllWindows() capture.release()
def load_model(): rknn = RKNN() print('-->loading model') rknn.load_rknn('./mobilenet_v2.rknn') print('-->Init runtime environment') ret = rknn.init_runtime(target='rk3399pro') if ret != 0: print('Init runtime enviroment failed') exit(ret) return rknn
def load_model(): rknn = RKNN() print('-->loading model') rknn.load_rknn('./pose_deploy_linevec_pre_compile.rknn') print('loading model done') print('--> Init runtime environment') ret = rknn.init_runtime() if ret != 0: print('Init runtime environment failed') exit(ret) print('!!!!!!!!!!!!!!!!!_____________________done___________________!!!!!!!!!!!!!!!!!!!!') return rknn
def load_model(): rknn = RKNN() print('-->loading model') rknn.load_rknn('./yolov3_tiny.rknn') #rknn.load_rknn('./yolov3.rknn') print('loading model done') print('--> Init runtime environment') ret = rknn.init_runtime() if ret != 0: print('Init runtime environment failed') exit(ret) print('done') return rknn
def init_pnet(): i=0 list = [] for i in range(9): rknn_name = "PNet_%d_%d.rknn" %(PNET_PYRAMID[i][0],PNET_PYRAMID[i][1]); pnet_rknn = RKNN() #verbose=True,verbose_file='./mobilenet_build.log' pnet_rknn.load_rknn(rknn_name) ret = pnet_rknn.init_runtime() if ret != 0: print('Init pnet runtime environment failed') exit(ret) i+=1 list.append(pnet_rknn) return list
def load_model(): rknn = RKNN() print('-->loading model') testtime = timer() rknn.load_rknn(rknn_model) print('loading model done: ', timer()-testtime) print('--> Init runtime environment') testtime = timer() ret = rknn.init_runtime(target='rk1808') if ret != 0: raise Exception('Init runtime environment failed') print('init done: ', timer()-testtime) return rknn
def load_model(model_name): # RKNN 객체 생성 rknn = RKNN() # RKNN 모델 불러 오기 print('-->loading model') rknn.load_rknn(model_name) print('loading model done') # RKNN 런타임 환경 초기화 print('--> Init runtime environment') ret = rknn.init_runtime() if ret != 0: print('Init runtime environment failed') exit(ret) print('done') return rknn
def test_rknn(): from rknn.api import RKNN rknn = RKNN() # Load rknn model print('--> Load RKNN model') ret = rknn.load_rknn('lprnet.rknn') if ret != 0: print('Export model failed!') exit(ret) # init runtime environment print('--> Init runtime environment') ret = rknn.init_runtime(target='rk1808') if ret != 0: print('Init runtime environment failed') exit(ret) print('done') # Inference print('--> Running model') image = cv2.imread('data/eval/000256.png') outputs = rknn.inference(inputs=[image]) preds = outputs[0] labels, pred_labels = decode(preds, CHARS) print(labels) print('done') rknn.release()
def load_model(): # Create RKNN object rknn = RKNN() print('-->loading model') rknn.load_rknn('./digital_gesture.rknn') print('loading model done') # init runtime environment print('--> Init runtime environment') ret = rknn.init_runtime(host='rk3399pro') if ret != 0: print('Init runtime environment failed') exit(ret) print('done') return rknn
def main(): #create RKNN object rknn = RKNN(verbose=True) #Direct Load RKNN Model rknn.load_rknn('./emotion.rknn') #만들어진 rknn을 로드 print('--> load success') #성공 메세지 출력 result = None #이미지 읽기 input_image = cv2.imread('./data/image/happy.jpg', cv2.IMREAD_COLOR) #esize한 이미지, 가장 큰 얼굴 object detected_face, face_coor = format_image(input_image) #탐지된 이미지가 있다면, if detected_face is not None: #image를 tenxor로 변환 & float32로 변환 (rknn이 float64는 지원하지 않음) "tensor 사이즈는 (1,2304), detected_face는 48X48" tensor = image_to_tensor(detected_face).astype(np.float32) #init runtime environment print('--> Init runtime environment') ret = rknn.init_runtime() #오류 메세지 출력 if ret != 0: print('Init runtime environment failed') #rknn 모델 실행 result = rknn.inference(inputs=[tensor]) print('run success') #list를 array로 변환 #result는 감정 예측 배열 result = np.array(result) #result가 존재하면 if result is not None: #감정 배열이 7개의 값을 가지므로 range(7)의 범위를 가짐 for i in range(7): #감정 배열 중 1인 값이 있다면, if result[0][0][i] == 1: #감정 예측 메세지 출력 print('당신의 감정은 ' + EMOTIONS[i] + '입니다.')
def load_model(modle_path): # Create RKNN object rknn = RKNN() print("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") print('-->loading model') rknn.load_rknn(modle_path) print('loading model done') # init runtime environment print('--> Init runtime environment') ret = rknn.init_runtime() if ret != 0: print('Init runtime environment failed') exit(ret) print('done') print("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") return rknn
def load_model0(model_path, npu_id): rknn = RKNN() devs = rknn.list_devices() device_id_dict = {} for index, dev_id in enumerate(devs[-1]): if dev_id[:2] != 'TS': device_id_dict[0] = dev_id if dev_id[:2] == 'TS': device_id_dict[1] = dev_id print('-->loading model : ' + model_path) rknn.load_rknn(model_path) print('--> Init runtime environment on: ' + device_id_dict[npu_id]) ret = rknn.init_runtime(device_id=device_id_dict[npu_id]) if ret != 0: print('Init runtime environment failed') exit(ret) print('done') return rknn
def init_rknn(rknn_path): rknn = RKNN(verbose=True) ret = rknn.load_rknn(rknn_path) if ret != 0: print('Load RKNN model failed') exit(ret) print('starting init runtime ...') rknn.init_runtime(target='rk1808', perf_debug=True) return rknn
def load_init_model(rknn_path, device_id): # 创造一个RKNN对象 rknn = RKNN(verbose=True, verbose_file='verbose.log') rknn.load_rknn(rknn_path) # init runtime environment print('--> Init runtime environment') # 初始化运行环境,主机,比较慢 if device_id == '': ret = rknn.init_runtime() print('device_id') else: ret = rknn.init_runtime(target='rk1808', device_id=device_id) # 计算棒被动模式 # 如果ret不等于0 if ret != 0: # 输出初始化运行环境失败 print('Init runtime environment failed') # 直接退出 rknn.release() return None return rknn
def init_pnet(): list = [] rknn_odd_name = "PNet_%d_%d.rknn" % (406, 207) pnet_odd_rknn = RKNN() #verbose=True,verbose_file='./mobilenet_build.log' pnet_odd_rknn.load_rknn(rknn_odd_name) ret = pnet_odd_rknn.init_runtime() if ret != 0: #print('Init pnet runtime environment failed') exit(ret) list.append(pnet_odd_rknn) rknn_even_name = "PNet_%d_%d.rknn" % (289, 147) pnet_even_rknn = RKNN() #verbose=True,verbose_file='./mobilenet_build.log' pnet_even_rknn.load_rknn(rknn_even_name) ret = pnet_even_rknn.init_runtime() if ret != 0: #print('Init pnet runtime environment failed') exit(ret) list.append(pnet_even_rknn) return list
class RecModel(): def __init__(self,args): self.model_path = args.model_path self.input_size = args.image_size self.threshold = args.threshold self.rknn = RKNN() self.load_model() def load_model(self): ret = self.rknn.load_rknn(self.model_path) if ret != 0: print('load rknn model failed') exit(ret) print('load model success') ret = self.rknn.init_runtime(target="rk3399pro", device_id="TD033101190400338") if ret != 0: print('Init runtime environment failed') exit(ret) print('init runtime success') version = self.rknn.get_sdk_version() print(version) # Inference print('--> Running model') def extract_features(self,img): if img.shape[0] > 112: img = cv2.resize(img, (112, 112), interpolation=cv2.INTER_AREA) if img.shape[0] < 112: img = cv2.resize(img, (112, 112), interpolation=cv2.INTER_CUBIC) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) outputs = self.rknn.inference(inputs=[img])[0] embedding = preprocessing.normalize(outputs).flatten() return embedding def load_facebank(self): self.features = np.load('npy/facebank_mtcnn_rknn_128.npy') self.names = np.load('npy/names_mtcnn_rknn_128.npy') def compare_feature(self,img): feature = self.extract_features(img) diff = np.expand_dims(feature,2) - np.expand_dims(np.transpose(self.features,[1,0]), 0) dist = np.sum(np.power(diff, 2),axis=1) minimum = np.min(dist, axis=1) min_idx = np.argmin(dist,axis=1) min_idx[minimum > self.threshold] = -1 # if no match, set idx to -1 if min_idx == -1: return (np.array([['None']]),np.array([0])) else: return self.names[min_idx], minimum
def load_rknn_model(PATH): rknn = RKNN() print('--> Loading model') ret = rknn.load_rknn(PATH) if ret != 0: print('load rknn model failed') exit(ret) print('done') ret = rknn.init_runtime() if ret != 0: print('Init runtime environment failed') exit(ret) print('done') return rknn
def init_rknn(rknn_path): rknn = RKNN() ret = rknn.load_rknn(rknn_path) if ret != 0: print('Load RKNN model failed') exit(ret) print('starting init runtime ...') # ret = rknn.init_runtime(target='rk1808', device_id='TS018080000000053') ret = rknn.init_runtime(target='rk1808') if ret != 0: print('Init runtime environment failed') exit(ret) print('done') return rknn
def load_rknn_model(PATH): # Create RKNN object rknn = RKNN() # Load tensorflow model print('--> Loading model') ret = rknn.load_rknn(PATH) if ret != 0: print('load rknn model failed') exit(ret) print('done') ret = rknn.init_runtime(device_id='TS018083200400178', rknn2precompile=True) if ret != 0: print('Init runtime environment failed') exit(ret) print('done') return rknn
def __deal(self, model, post_func): rknn = RKNN() ret = rknn.load_rknn(path=model) # init runtime environment logger.debug('--> Init runtime environment') ret = rknn.init_runtime() if ret != 0: logger.error('Init runtime environment failed') exit(ret) logger.debug('Init done') r_list = [self.rfd] w_list = [self.wfd] e_list = [self.rfd, self.wfd] while True: fd_r_list, fd_w_list, fd_e_list = select.select( r_list, w_list, e_list, select_timeout) if not (fd_r_list or fd_w_list or fd_e_list): continue for rs in fd_r_list: if rs is self.rfd: decimg = self.__recieve_frame() # logger.debug('__recieve_frame: %d' % (len(decimg))) if decimg is None: logger.error('decimg is None') continue outputs = rknn.inference(inputs=[decimg]) data = post_func(outputs) for ws in fd_w_list: if ws is self.wfd: self.__send_result(data) for es in fd_e_list: logger.error("error fd list: %s" % (es)) rknn.release() logger.debug('__deal finish')
print('--> Building model') ret = rknn.build(do_quantization=True, dataset='./dataset.txt') if ret != 0: print('Build pytorch failed!') exit(ret) print('done') # Export rknn model print('--> Export RKNN model') ret = rknn.export_rknn('./resnet_18.rknn') if ret != 0: print('Export resnet_18.rknn failed!') exit(ret) print('done') ret = rknn.load_rknn('./resnet_18.rknn') # Set inputs img = cv2.imread('./space_shuttle_224.jpg') img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # init runtime environment print('--> Init runtime environment') ret = rknn.init_runtime() if ret != 0: print('Init runtime environment failed') exit(ret) print('done') # Inference print('--> Running model')
perfs = 'perfs: {}\n'.format(perfs) print(perfs) def softmax(x): return np.exp(x) / sum(np.exp(x)) if __name__ == '__main__': # Create RKNN object rknn = RKNN() # Load RKNN model print('--> Load RKNN model') ret = rknn.load_rknn('./mnasnet0_5.rknn') if ret != 0: print('Load mnasnet0_5.rknn failed!') exit(ret) print('done') # Set inputs img = cv2.imread('./space_shuttle_224.jpg') img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # init runtime environment print('--> Init runtime environment') ret = rknn.init_runtime() # ret = rknn.init_runtime(target='rk1808', device_id='1808s1') if ret != 0: print('Init runtime environment failed')
print('--> Loading model') ret = rknn.load_tflite(model='./deepfusion_addition.tflite') print('done') # Build Model print('--> Building model') rknn.build(do_quantization=True, dataset='./dataset.txt') print('done') # Export RKNN Model ret = rknn.export_rknn('./deepfusion.rknn') # Direct Load RKNN Model rknn.load_rknn('./deepfusion.rknn') # Set inputs infrared_input_temp = cv2.imread(infrared, cv2.IMREAD_GRAYSCALE) #infrared_input = cv2.cvtColor(infrared_input_temp, cv2.COLOR_BGR2RGB) infrared_input = cv2.resize(infrared_input_temp, (INPUT_SIZE_WIDTH, INPUT_SIZE_HEIGHT), interpolation=cv2.INTER_CUBIC) visible_input_temp = cv2.imread(visible, cv2.IMREAD_GRAYSCALE) #visible_input = cv2.cvtColor(visible_input_temp, cv2.COLOR_BGR2RGB) visible_input = cv2.resize(visible_input_temp, (INPUT_SIZE_WIDTH, INPUT_SIZE_HEIGHT), interpolation=cv2.INTER_CUBIC) # init runtime environment print('--> Init runtime environment')
fp.close() box_priors = np.array(box_priors_) box_priors = box_priors.reshape(4, NUM_RESULTS) return box_priors if __name__ == '__main__': # Create RKNN object rknn = RKNN() # Direct Load RKNN Model rknn.load_rknn('./ssd_mobilenet_v1_coco.rknn') # Set inputs orig_img = cv2.imread('./road.bmp') img = cv2.cvtColor(orig_img, cv2.COLOR_BGR2RGB) img = cv2.resize(img, (INPUT_SIZE, INPUT_SIZE), interpolation=cv2.INTER_CUBIC) # init runtime environment print('--> Init runtime environment') ret = rknn.init_runtime() if ret != 0: print('Init runtime environment failed') exit(ret) print('done') # Inference
if value > 0: topi = "{}: {}\n".format(index[j], value) else: topi = "-1: 0.0\n" top5_str += topi print("top5_str:`{}`".format(top5_str)) if __name__ == "__main__": # Create RKNN object rknn = RKNN() img_height = 299 # Direct Load RKNN Model print("--> Loading RKNN model") ret = rknn.load_rknn("./inception_v3.rknn") if ret != 0: print("Load inception_v3.rknn failed!") exit(ret) # Set inputs img = cv2.imread("./data/beaver.jpg") img = cv2.resize(img, dsize=(img_height, img_height), interpolation=cv2.INTER_CUBIC) # This can opt out if "reorder_channel" is set to "2 1 0" # rknn.config() in `convert_rknn.py` img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # init runtime environment
fill='black', font=font) print('write output image: {}{}_quant.jpg'.format(output_dir, name)) np.copyto(img, np.array(img_pil)) cv2.imwrite("{}{}_quant.jpg".format(output_dir, name), img) print('write output image finished.') if __name__ == '__main__': # Create RKNN object rknn = RKNN() #print('--> Load RKNN Model') ret = rknn.load_rknn('./ssd_mobilenet_v2.rknn') if ret != 0: print('load rknn model failed') exit(ret) print('done') # Set inputs img = cv2.imread('./dog_bike_car_300x300.jpg') img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # init runtime environment print('--> Init runtime environment') ret = rknn.init_runtime() if ret != 0: print('Init runtime environment failed') exit(ret)
rknn.load_tensorflow(tf_pb='./freeze.pb', inputs=['Placeholder'], outputs=['fully_connected/Identity'], input_size_list=[[28, 6]]) print('done') # Build Model print('--> Building model') rknn.build(do_quantization=False) print('done') # Export RKNN Model rknn.export_rknn('./MTM_LSTM_RKNN.rknn') # Direct Load RKNN Model rknn.load_rknn('./MTM_LSTM_RKNN.rknn') stock_file_name = 'AAPL_1m.csv' encoding = 'euc-kr' names = ['Date', 'Open', 'High', 'Low', 'Close', 'Adj Close', 'Volume'] # Read and Delete Axis raw_dataframe = pd.read_csv(stock_file_name, names=names, encoding=encoding) today = raw_dataframe.values[-1, 0] del raw_dataframe['Date'] storage = raw_dataframe.values[1:].astype(np.float32) """ pandas 안쓰는 방법 storage = np.genfromtxt(stock_file_name, encoding='euc-kr', delimiter=',')
print( 'Such as: python {} mobilenet_v1.rknn mobilenet_v1.hw.rknn'.format( sys.argv[0])) exit(1) from rknn.api import RKNN orig_rknn = sys.argv[1] hw_rknn = sys.argv[2] # Create RKNN object rknn = RKNN() # Load rknn model print('--> Loading RKNN model') ret = rknn.load_rknn(orig_rknn) if ret != 0: print('Load RKNN model failed!') exit(ret) print('done') # Init runtime environment print('--> Init runtime environment') # Note: you must set rknn2precompile=True when call rknn.init_runtime() ret = rknn.init_runtime(target='rk1808', rknn2precompile=True) if ret != 0: print('Init runtime environment failed') exit(ret) print('done')
if ret != 0: print('Build onnx failed!') exit(ret) print('done') # Export rknn model print('--> Export RKNN model {}'.format(RKNN_MODEL_PATH)) ret = rknn.export_rknn(RKNN_MODEL_PATH) if ret != 0: print('Export RKNN model failed!') exit(ret) print('done') else: # Direct load rknn model print('Loading RKNN model {}'.format(RKNN_MODEL_PATH)) ret = rknn.load_rknn(RKNN_MODEL_PATH) if ret != 0: print('load rknn model failed.') exit(ret) print('done') if not NEED_RUN_MODEL: rknn.release() exit(0) a = time.time() print('--> init runtime') if NPU: ret = rknn.init_runtime(target='rk3399pro', device_id='2YJLMNA67N') else: ret = rknn.init_runtime()
def trackAll(self, start_video_num, pause_val): """Track the objects in the video """ videos = self.videos objRegressor = self.regressor objTracker = self.tracker count = 0 rknn = RKNN(verbose=True, verbose_file='mono-track/rknn.log') # load rknn model print('--> Loading rknn model') rknn.load_rknn(objRegressor.rknn_model) print('done') # init runtime environment print('--> Init runtime environment') ret = rknn.init_runtime() if ret != 0: print('Init runtime environment failed') exit(ret) print('done') for i in range(start_video_num, len(videos)): video_frames = videos[i].all_frames annot_frames = videos[i].annotations num_frames = min(len(video_frames), len(annot_frames)) first_frame = cv2.imread(videos[i].all_frames[0]) print("frame shape:", first_frame.shape) self.fream_size = (first_frame.shape[1], first_frame.shape[0]) vw = cv2.VideoWriter( os.path.join(self.save_path, 'result{}.avi'.format(count)), cv2.VideoWriter_fourcc(*'MJPG'), 30.0, self.fream_size, True) # Get the first frame of this video with the intial ground-truth bounding box frame_0 = video_frames[0] bbox_0 = annot_frames[0].bbox sMatImage = cv2.imread(frame_0) objTracker.init(sMatImage, bbox_0, objRegressor) for j in range(1, num_frames): frame = video_frames[j] sMatImage = cv2.imread(frame) sMatImageDraw = sMatImage.copy() bbox = annot_frames[j].bbox if opencv_version == '2': cv2.rectangle(sMatImageDraw, (int(bbox.x1), int(bbox.y1)), (int(bbox.x2), int(bbox.y2)), (255, 255, 255), 2) else: sMatImageDraw = cv2.rectangle(sMatImageDraw, (int(bbox.x1), int(bbox.y1)), (int(bbox.x2), int(bbox.y2)), (255, 255, 255), 2) bbox = objTracker.track(sMatImage, objRegressor, rknn) print("track_bbox:[{}, {}, {}, {}]".format( bbox.x1, bbox.y1, bbox.x2, bbox.y2)) print("frame shape", sMatImageDraw.shape) if opencv_version == '2': cv2.rectangle(sMatImageDraw, (int(bbox.x1), int(bbox.y1)), (int(bbox.x2), int(bbox.y2)), (255, 0, 0), 2) else: sMatImageDraw = cv2.rectangle(sMatImageDraw, (int(bbox.x1), int(bbox.y1)), (int(bbox.x2), int(bbox.y2)), (255, 0, 0), 2) if not self.save_path: cv2.imshow('Results', sMatImageDraw) cv2.waitKey(10) else: vw.write(sMatImageDraw) vw.release() print("saving result{}.avi in {}".format(count, self.save_path)) count += 1