class Thread(QThread): changePixmap = pyqtSignal(QPixmap) changePicture = pyqtSignal(QPixmap) changeProcessbar = pyqtSignal(tuple) changeText = pyqtSignal(str) listenkey = pyqtSignal(str) changeBorder = pyqtSignal(int) changeBorderToNormal = pyqtSignal() def __init__(self): super(Thread, self).__init__() self._args = parser.parse_args() # 监听键盘 self.listenkey.connect(self.keyPress) # 获取摄像头句柄 self._cap_video = cv2.VideoCapture(self._args.device) # 初始化事件 self._event = Event() # 显示结果队列 self._queue_draw = queue.Queue() # 开启线程池 self._pool = ThreadPoolExecutor(max_workers=POOL_SIZE) # 获取上传帧间距 self._pool.submit(self._get_upload_distance, self._event) # 子线程初始化 self._frame_distance = list() # 上传标志位 self._is_upload = [False] # 系统是否关闭的标志 self.flag = True def run(self): # 初始化阶段不执行识别逻辑 if not self._event.isSet(): self.changeText.emit("The system is initializing.") self._event.wait() # 获取识别对象 self._gesture_rec = GestureRec(server_address=self._args.server_address, upload_size=UPLOAD_SIZE, frame_distance=self._frame_distance[0], queue_draw=self._queue_draw, pool=self._pool) frame_total = 0 upload_total = 0 if not self._cap_video.isOpened(): self._cap_video = cv2.VideoCapture(self._args.device) self.changeText.emit("The system has been restarted.") while self._cap_video.isOpened(): if self.flag == 1: ret, frame = self._cap_video.read() frame_total += 1 if ret: upload_total = self._gesture_rec.check_upload(frame_total, upload_total, frame, self._is_upload) category_dict = self._gesture_rec.check_draw_text() if category_dict is not None: print(category_dict) # 计算剩余进度条值 divided = self._processbar_generator(category_dict) divided_index = 0 # 显示非预测分类结果 if divided is None: self.changeText.emit(list(category_dict)[0]) continue # 显示文本 show_category = max(category_dict, key=category_dict.get) self.changeText.emit(show_category) if show_category in ["No gesture", "Doing other things"]: for category in lists: if category in ["No gesture", "Doing other things"]: continue self.changeProcessbar.emit((lists.index(category), 0)) continue else: # 显示分类图框框 self.changeBorder.emit(lists.index(show_category)) # 同时更新12个进度条 for category in lists: if category in ["No gesture", "Doing other things"]: continue if category in category_dict: self.changeProcessbar.emit((lists.index(category), category_dict[category])) else: try: self.changeProcessbar.emit((lists.index(category), divided[divided_index])) divided_index += 1 except Exception as e: print(e) self._show_frame(frame) else: # 初始化 self._is_upload = [False] self.flag = True self._show_picture("resource/white.jpg") self.changeText.emit("Stop") self._cap_video.release() def _processbar_generator(self, category_dict): """剩余进度条生成随机值 算法: 1. 构造一个N+1项的数组,第一项为0,最后一项为max 2. 在[1, L)中随机选取N-1个不重复的正整数,并排序 3. 所有的数组相邻两项的差值即为生成值 """ total = 100 - sum(category_dict.values()) if total > 100: return None nums = 12 - len(category_dict) if "No gesture" in category_dict: nums += 1 # 当总值少于分配个数时,直接返回 if total < nums: res = [0] * nums res[random.randint(0, nums - 1)] += total return res divided = [] stick = [0] + random.sample(range(0, total), nums - 1) + [total] stick.sort() for i in range((len(stick) - 1)): divided.append((stick[i + 1] - stick[i])) return divided def keyPress(self, str): if str == "S": if self._is_upload[0]: # 动作结束,预测 self._is_upload[0] = False self._gesture_rec.end_action() else: # 动作开始,上传图片 self.changeBorderToNormal.emit() self.changeText.emit("Start") self._is_upload[0] = True self._gesture_rec.start_action() def _show_frame(self, frame): """帧画面显示于QT界面""" try: rgbImage = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) convertToQtFormat = QImage(rgbImage.data, rgbImage.shape[1], rgbImage.shape[0], QImage.Format_RGB888) convertToQtFormat = QPixmap.fromImage(convertToQtFormat) self.changePixmap.emit(convertToQtFormat) except Exception as e: pass def _show_picture(self, picture): try: img = cv2.imread(picture) rgbImage = cv2.cvtColor(img,cv2.COLOR_BGR2RGB) convertToQtFormat = QImage(rgbImage.data, rgbImage.shape[1], rgbImage.shape[0], QImage.Format_RGB888) convertToQtFormat = QPixmap.fromImage(convertToQtFormat) self.changePicture.emit(convertToQtFormat) except Exception as e: pass def _get_upload_distance(self, event): """计算保存帧画面间距""" # Number of frames to capture num_frames = 120 print("Initialization...") print("Capturing {0} frames".format(num_frames)) # Start time start = time.time() # Grab a few frames for i in range(0, num_frames): ret, frame = self._cap_video.read() # End time end = time.time() # Time elapsed seconds = end - start print("Time taken : {0} seconds".format(seconds)) # Calculate frames per second fps = num_frames / seconds distance = math.ceil(fps / 5) print("Estimated frames per second : {0}".format(fps)) print("Save distance: %s" % distance) self._frame_distance.append(distance) self.changeText.emit("System initialization is complete.") event.set() def stop(self): self.flag = 0
class Thread(QThread): changePixmap = pyqtSignal(QPixmap) changeOpencv = pyqtSignal(QPixmap) changePicture = pyqtSignal(QPixmap) changeProcessbar = pyqtSignal(tuple) changeText = pyqtSignal(str) changeBorder = pyqtSignal(int) changeBorderToNormal = pyqtSignal() def __init__(self): super(Thread, self).__init__() self._args = parser.parse_args() # 获取摄像头句柄 self._cap_video = cv2.VideoCapture(self._args.device) # 显示结果队列 self._queue_draw = queue.Queue() # 初始化事件 self._event = Event() # 开启线程池 self._pool = ThreadPoolExecutor(max_workers=POOL_SIZE) # 子线程初始化 self._frame_distance = list() self._pool.submit(self._get_upload_distance, self._event) # 选择的帧差方法 self._method = self._args.method # 阈值 self._threshold = self._args.threshold if self._method == "knn": self._detector = KNNMotionDetector() self._es = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) else: self._detector = Mog2MotionDetector() self._mask_analyser = BinaryMaskAnalyser() # 系统启动标志 self._new = True # 系统是否关闭的标志 self.flag = True # 上传标志位 self._is_upload = [None] def run(self): # 初始化阶段不执行识别逻辑 if not self._event.isSet(): self.changeText.emit("系统正在初始化,请稍等...") self._event.wait() # 获取识别对象 self._gesture_rec = GestureRec( server_address=self._args.server_address, upload_size=UPLOAD_SIZE, frame_distance=self._frame_distance[0], queue_draw=self._queue_draw, pool=self._pool) frame_total = 0 upload_total = 0 if not self._cap_video.isOpened(): self._cap_video = cv2.VideoCapture(self._args.device) self.changeText.emit("系统已重启") while self._cap_video.isOpened(): if self.flag == 1: ret, frame = self._cap_video.read() frame_total += 1 if self._method == "knn": mask = self._detector.returnMask(frame) # dilated = cv2.dilate(mask, self._es, iterations=2) # 形态学膨胀 else: mask = self._detector.returnGreyscaleMask(frame) mask = cv2.merge([mask, mask, mask]) if ret: upload_total = self._gesture_rec.check_upload( frame_total, upload_total, frame, self._is_upload) category_dict = self._gesture_rec.check_draw_text() if category_dict is not None: print(category_dict) # 计算剩余进度条值 divided = self._processbar_generator(category_dict) divided_index = 0 # 显示非预测分类结果 if divided is None: self.changeText.emit(list(category_dict)[0]) continue # 显示文本 show_category = max(category_dict, key=category_dict.get) self.changeText.emit(show_category) if show_category == "非手势动作": for category in lists: if category == "非手势动作": continue self.changeProcessbar.emit( (lists.index(category), 0)) continue else: # 显示分类图框框 self.changeBorder.emit(lists.index(show_category)) # 同时更新12个进度条 for category in lists: if category == "非手势动作": continue if category in category_dict: self.changeProcessbar.emit( (lists.index(category), category_dict[category])) else: self.changeProcessbar.emit( (lists.index(category), divided[divided_index])) divided_index += 1 found = False exception = False if self._mask_analyser.returnNumberOfContours(mask) > 0: x, y, w, h = self._mask_analyser.returnMaxAreaRectangle( mask) if w + h > self._threshold: # print(x, y, w, h) found = True # cv2.rectangle(frame, (x, y), (x + w, y + h), [0, 255, 0], 2) if w + h > frame.shape[0] + frame.shape[1] - 10: exception = True if not self._new and self._is_upload[0] is None and ( found and not exception): # 动作开始,上传图片 self._is_upload[0] = True # 标识识别开始 self.changeBorderToNormal.emit() self.changeText.emit("请开始做动作") self._gesture_rec.start_action() if not self._new and self._is_upload[0] and (not found or exception): # 动作结束,预测 self._is_upload[0] = False # 标识识别结束 self._gesture_rec.end_action(is_upload=self._is_upload) if self._new and not found: self._new = False self._show_opencv(mask) self._show_frame(frame) else: # 初始化 self._new = True self.flag = True self._is_upload = [None] self._show_picture("resource/white.jpg") self.changeText.emit("系统停止") self._cap_video.release() def _processbar_generator(self, category_dict): """剩余进度条生成随机值 算法: 1. 构造一个N+1项的数组,第一项为0,最后一项为max 2. 在[1, L)中随机选取N-1个不重复的正整数,并排序 3. 所有的数组相邻两项的差值即为生成值 """ total = 100 - sum(category_dict.values()) if total > 100: return None nums = 12 - len(category_dict) if "非手势动作" in category_dict: nums += 1 # 当总值少于分配个数时,直接返回 if total < nums: res = [0] * nums res[random.randint(0, nums - 1)] += total return res divided = [] stick = [0] + random.sample(range(0, total), nums - 1) + [total] stick.sort() for i in range((len(stick) - 1)): divided.append((stick[i + 1] - stick[i])) return divided def _show_frame(self, frame): """帧画面显示于QT界面""" try: rgbImage = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) convertToQtFormat = QImage(rgbImage.data, rgbImage.shape[1], rgbImage.shape[0], QImage.Format_RGB888) convertToQtFormat = QPixmap.fromImage(convertToQtFormat) self.changePixmap.emit(convertToQtFormat) except Exception as e: pass def _show_opencv(self, frame): #try: #rgbImage = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) convertToQtFormat = QImage(frame.data, frame.shape[1], frame.shape[0], QImage.Format_Indexed8) convertToQtFormat = QPixmap.fromImage(convertToQtFormat) self.changeOpencv.emit(convertToQtFormat) #except Exception as e: #pass def _show_picture(self, picture): try: img = cv2.imread(picture) rgbImage = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) convertToQtFormat = QImage(rgbImage.data, rgbImage.shape[1], rgbImage.shape[0], QImage.Format_RGB888) convertToQtFormat = QPixmap.fromImage(convertToQtFormat) self.changePicture.emit(convertToQtFormat) except Exception as e: pass def _get_upload_distance(self, event): """计算保存帧画面间距""" # Number of frames to capture num_frames = 120 print("系统初始化...") print("Capturing {0} frames".format(num_frames)) # Start time start = time.time() # Grab a few frames for i in range(0, num_frames): ret, frame = self._cap_video.read() # End time end = time.time() # Time elapsed seconds = end - start print("Time taken : {0} seconds".format(seconds)) # Calculate frames per second fps = num_frames / seconds distance = math.ceil(fps / 5) print("Estimated frames per second : {0}".format(fps)) print("Save distance: %s" % distance) self._frame_distance.append(distance) self.changeText.emit("系统初始化完毕") event.set() def stop(self): self.flag = 0