print('--> Export RKNN model') ret = rknn.export_rknn('./mobilenet_v2.rknn') if ret != 0: print('Export mobilenet_v2.rknn failed!') exit(ret) print('done') # Set inputs img = cv2.imread('./cat_224x224.jpg') img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) print('--> Init runtime environment') ret = rknn.init_runtime() if ret != 0: print('Init runtime environment failed') exit(ret) print('done') # Inference print('--> Running model') outputs = rknn.inference(inputs=[img]) show_outputs(outputs) print('done') # perf print('--> Begin evaluate model performance') perf_results = rknn.eval_perf(inputs=[img]) print('done') rknn.release()
iou = CalculateOverlap(xmin0, ymin0, xmax0, ymax0, xmin1, ymin1, xmax1, ymax1) if iou >= 0.45: candidateBox[0][j] = -1 # Draw result for i in range(0, vaildCnt): if candidateBox[0][i] == -1: continue n = candidateBox[0][i] xmin = max(0.0, min(1.0, predictions[0][n][1])) * INPUT_SIZE ymin = max(0.0, min(1.0, predictions[0][n][0])) * INPUT_SIZE xmax = max(0.0, min(1.0, predictions[0][n][3])) * INPUT_SIZE ymax = max(0.0, min(1.0, predictions[0][n][2])) * INPUT_SIZE # print("%d @ (%d, %d) (%d, %d) score=%f" % (topClassScoreIndex, xmin, ymin, xmax, ymax, topClassScore)) cv2.rectangle(orig_img, (int(xmin), int(ymin)), (int(xmax), int(ymax)), (random.random() * 255, random.random() * 255, random.random() * 255), 3) cv2.imwrite("out.jpg", orig_img) # Evaluate Perf on Simulator rknn.eval_perf(inputs=[img], is_print=True) # Release RKNN Context rknn.release()
if ret != 0: print('Init runtime environment failed') exit(ret) print('done') # Inference print('--> Running model') outputs = rknn.inference(inputs=[visible_input, infrared_input]) print('done') print('inference result: ', outputs) img = np.array(outputs[0]) print(img.shape) print('img.min = ') print(np.min(img)) #img=img-np.min(img) print(np.max(img)) img = img / np.max(img) * 255 img = np.reshape(img, (INPUT_SIZE_WIDTH, INPUT_SIZE_HEIGHT)) print(img.shape) print(visible_input.shape) cv2.imwrite("out.jpg", img) # Evaluate Perf on Simulator rknn.eval_perf(inputs=[visible_input], is_print=True) # Release RKNN Context rknn.release()
def main(): with open(yaml_file, 'r') as F: config = yaml.load(F) # print('config is:') # print(config) model_type = config['running']['model_type'] print('model_type is {}'.format(model_type)) rknn = RKNN(verbose=True) print('--> config model') rknn.config(**config['config']) print('done') print('--> Loading model') load_function = getattr(rknn, _model_load_dict[model_type]) ret = load_function(**config['parameters'][model_type]) if ret != 0: print('Load mobilenet_v2 failed! Ret = {}'.format(ret)) exit(ret) print('done') #### # print('hybrid_quantization') # ret = rknn.hybrid_quantization_step1(dataset=config['build']['dataset']) if model_type != 'rknn': print('--> Building model') ret = rknn.build(**config['build']) if ret != 0: print('Build mobilenet_v2 failed!') exit(ret) else: print('--> skip Building model step, cause the model is already rknn') if config['running']['export'] is True: print('--> Export RKNN model') ret = rknn.export_rknn(**config['export_rknn']) if ret != 0: print('Init runtime environment failed') exit(ret) else: print('--> skip Export model') if (config['running']['inference'] is True) or (config['running']['eval_perf'] is True): print('--> Init runtime environment') ret = rknn.init_runtime(**config['init_runtime']) if ret != 0: print('Init runtime environment failed') exit(ret) print('--> load img') img = cv2.imread(config['img']['path']) print('img shape is {}'.format(img.shape)) # img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) inputs = [img] if config['running']['inference'] is True: print('--> Running model') config['inference']['inputs'] = inputs #print(config['inference']) outputs = rknn.inference(inputs) #outputs = rknn.inference(config['inference']) print('len of output {}'.format(len(outputs))) print('outputs[0] shape is {}'.format(outputs[0].shape)) print(outputs[0][0][0:2]) else: print('--> skip inference') if config['running']['eval_perf'] is True: print('--> Begin evaluate model performance') config['inference']['inputs'] = inputs perf_results = rknn.eval_perf(inputs=[img]) else: print('--> skip eval_perf') else: print('--> skip inference') print('--> skip eval_perf')