def run(): # 系统初始化,参数要与创建技能时填写的检验值保持一致 hilens.init("helmet") # 读取技能配置 skill_cfg = hilens.get_skill_config() if skill_cfg is None or 'server_url' not in skill_cfg or 'IPC_address' not in skill_cfg: hilens.error("skill config not correct") return # 获取POST服务器地址和IPC地址,多个IPC地址用分号分隔开 server_url = skill_cfg['server_url'] camera_list = skill_cfg['IPC_address'].split(';') # 每个IPC启动一个独立的线程 threads_list = [] for camera_address in camera_list: t = threading.Thread(target=camera_thread, args=(camera_address, server_url)) t.start() threads_list.append(t) for t in threads_list: t.join() hilens.terminate()
def camera_thread(camera_address, server_url): # 初始化IPC camera = hilens.VideoCapture(camera_address) # 初始化模型 helmet_model_path = hilens.get_model_dir() + "helmet_template_model.om" helmet_model = hilens.Model(helmet_model_path) while True: # 读取一帧图片(YUV NV21格式) input_yuv = camera.read() # 图片预处理:转为RGB格式、缩放为模型输入尺寸 img_rgb = cv2.cvtColor(input_yuv, cv2.COLOR_YUV2RGB_NV21) img_preprocess, img_w, img_h = preprocess(img_rgb) # 模型推理,并进行后处理得到检测框 output = helmet_model.infer([img_preprocess.flatten()]) bboxes = get_result(output, img_w, img_h) # 从检测结果中判断是否有未佩戴安全帽的人 if no_helmet(bboxes): # 后处理得到检测框,并在RGB图中画框 img_rgb = draw_boxes(img_rgb, bboxes) # 以POST方式传输数据 try: post_msg(server_url, img_rgb, bboxes) except Exception as e: hilens.error("post data failed!") print("Reason : ", e) # 等待5秒后再检测,避免发送数据过多 time.sleep(15)
def sendData(uri, res): for item in res: item[5] = '%.3f' % item[5] # Convert posibility from float to str try: requests.post(uri, json.dumps(res).encode('UTF-8')) except ConnectionError as e: hilens.error('Failed to send data, connection error:{}'.format(e)) except Timeout as e: hilens.error('Failed to send data, connection timeout:{}'.format(e))
def image_test(): ret = hilens.init("") if ret != 0: hilens.error("Failed to initialize HiLens") return img_file = './flight_test1.jpg' test_img(img_file, model_path) hilens.terminate()
def run(work_space): hilens.error("start!!!!!!!!!!!!!!") # 创建两个线程 try: _thread.start_new_thread(run_inner, (work_space, )) _thread.start_new_thread(start_listen, ()) except: print("Error: 无法启动线程") while 1: pass hilens.error("end!!!!!!!!!!!!!!")
def run(): # 配置系统日志级别 hilens.set_log_level(hilens.ERROR) # 系统初始化,参数要与创建技能时填写的检验值保持一致 hilens.init("gesture") # 初始化模型 gesture_model_path = hilens.get_model_dir() + "gesture_template_model.om" gesture_model = hilens.Model(gesture_model_path) # 初始化本地摄像头与HDMI显示器 camera = hilens.VideoCapture() display_hdmi = hilens.Display(hilens.HDMI) # 上一次上传OBS图片的时间与上传间隔 last_upload_time = 0 upload_duration = 5 # 读取技能配置 skill_cfg = hilens.get_skill_config() if skill_cfg is None or 'server_url' not in skill_cfg: hilens.error("server_url not configured") return while True: # 读取一帧图片(YUV NV21格式) input_yuv = camera.read() # 图片预处理:转为RGB格式、缩放为模型输入尺寸 img_rgb = cv2.cvtColor(input_yuv, cv2.COLOR_YUV2RGB_NV21) img_preprocess, img_w, img_h = preprocess(img_rgb) # 模型推理 output = gesture_model.infer([img_preprocess.flatten()]) # 后处理得到手势所在区域与类别,并在RGB图中画框 bboxes = get_result(output, img_w, img_h) img_rgb = draw_boxes(img_rgb, bboxes) # 输出处理后的图像到HDMI显示器,必须先转回YUV NV21格式 output_yuv = hilens.cvt_color(img_rgb, hilens.RGB2YUV_NV21) display_hdmi.show(output_yuv) # 上传OK手势图片到OBS,为防止OBS数据存储过多,间隔一定的时间才上传图片 if time.time() - last_upload_time > upload_duration: # 截取出OK手势图片(如果有的话) img_OK = get_OK(img_rgb, bboxes) if img_OK is not None: # 上传OK手势图片到OBS,图片(用当前时间命名)需要先转为BGR格式并按照jpg格式编码 img_OK = cv2.cvtColor(img_OK, cv2.COLOR_RGB2BGR) img_OK = cv2.imencode('.jpg', img_OK)[1] filename = time.strftime("%Y_%m_%d_%H_%M_%S", time.localtime()) ret = hilens.upload_bufer(filename + "_OK.jpg", img_OK, "write") if ret != 0: hilens.error("upload pic failed!") return last_upload_time = time.time() # 以POST方式传输处理后的整张图片 try: post_msg(skill_cfg['server_url'], img_rgb) except Exception as e: hilens.error("post data failed!") print("Reason : ", e) hilens.terminate()
def run(): # 系统初始化,参数要与创建技能时填写的检验值保持一致 hilens.init("landmarks") # 初始化自带摄像头与HDMI显示器 camera = hilens.VideoCapture() display = hilens.Display(hilens.HDMI) # 初始化模型:人脸检测模型(centerface)、人脸68个关键点检测模型 centerface_model_path = hilens.get_model_dir() + "centerface_template_model.om" centerface_model = hilens.Model(centerface_model_path) landmark_model_path = hilens.get_model_dir() + "landmark68_template_model.om" landmark_model = hilens.Model(landmark_model_path) # 本段代码展示如何录制HiLens Kit摄像头拍摄的视频 fps = 10 size = (1280, 720) format = cv2.VideoWriter_fourcc('M','J','P','G') # 注意视频格式 writer = cv2.VideoWriter("face.avi", format, fps, size) # 待保存视频的起始帧数,可自行调节或加入更多逻辑 frame_count = 0 frame_start = 100 frame_end = 150 uploaded = False while True: # 读取一帧图片(YUV NV21格式) input_yuv = camera.read() # 图片预处理:转为RGB格式、缩放为模型输入尺寸 img_rgb = cv2.cvtColor(input_yuv, cv2.COLOR_YUV2RGB_NV21) img_pre = preprocess(img_rgb) img_h, img_w = img_rgb.shape[:2] # 人脸检测模型推理,并进行后处理得到画面中最大的人脸检测框 output = centerface_model.infer([img_pre.flatten()]) face_box = get_largest_face_box(output, img_h, img_w) # 画面中检测到有人脸且满足一定条件 if face_box is not None: # 截取出人脸区域并做预处理 img_face = preprocess_landmark(img_rgb, face_box) # 人脸关键点模型推理,得到68个人脸关键点 output2 = landmark_model.infer([img_face.flatten()]) landmarks = output2[0].reshape(68, 2) # 将人脸框和人脸关键点画在RGB图中 img_rgb = draw_landmarks(img_rgb, face_box, landmarks) # 输出处理后的图像到HDMI显示器,必须先转换成YUV NV21格式 output_nv21 = hilens.cvt_color(img_rgb, hilens.RGB2YUV_NV21) display.show(output_nv21) # 录制一段视频并发送到OBS中 if not uploaded: frame_count += 1 if frame_count > frame_end: # 录制结束点 uploaded = True writer.release() # 先保存在本地 ret = hilens.upload_file("face.avi", "face.avi", "write") # 发送到OBS中 if ret != 0: hilens.error("upload file failed!") return elif frame_count > frame_start: # 录制开始点 # 注意写入的图片格式必须为BGR writer.write(cv2.cvtColor(img_rgb, cv2.COLOR_RGB2BGR)) hilens.terminate()