def load_pretrain_model(model): dyn_graph_path = {#预训练模型所在路径 'VGG_origin': str(file_path / "Pose/graph_models/VGG_origin/graph_opt.pb"), 'mobilenet_thin': str(file_path / "Pose/graph_models/mobilenet_thin/graph_opt.pb") } graph_path = dyn_graph_path[model] if not os.path.isfile(graph_path): raise Exception('Graph file doesn\'t exist, path=%s' % graph_path) return TfPoseVisualizer(graph_path, target_size=(input_width, input_height))
def load_pretrain_model(model): dyn_graph_path = { 'VGG_origin': str(file_path / "Pose/graph_models/VGG_origin/graph_opt.pb"), } # print(dyn_graph_path.get('VGG_origin')) # C:\Users\haram\PycharmProjects\OpenBankProject\Pose\graph_models\VGG_origin\graph_opt.pb graph_path = dyn_graph_path[model] if not os.path.isfile(graph_path): raise Exception('Graph file doesn\'t exist, path=%s' % graph_path) return TfPoseVisualizer(graph_path, target_size=(input_width, input_height))
def load_pretrain_model(model): dyn_graph_path = { 'VGG_origin': str(file_path / "Pose/graph_models/VGG_origin/graph_opt.pb"), #pose训练好的 'mobilenet_thin': str(file_path / "Pose/graph_models/mobilenet_thin/graph_opt_mobile.pb") } graph_path = dyn_graph_path[model] if not os.path.isfile(graph_path): raise Exception('Graph file doesn\'t exist, path=%s' % graph_path) #输入:Pose模型文件 目标大小尺寸 return TfPoseVisualizer(graph_path, target_size=(input_width, input_height)) #(656,368) #根据自己的修改
def show_camera(self): start = time.time() ret, frame = self.cap.read() if ret: if self.__flag_mode == 1: self.infoBox.setText(u'当前为人体姿态估计模式') humans = estimator.inference(frame) # 返回heatMat pafMat空间地址 # get pose info # return frame, joints, bboxes, xcenter pose = TfPoseVisualizer.draw_pose_rgb(frame, humans) # pose绘制在画面上了 # recognize the action framewise show = framewise_recognize(pose, action_classifier) # 返回绘制好的frame # elif self.__flag_mode == 2: # self.infoBox.setText(u'当前为多人跟踪模式') # humans = poseEstimator.inference(show) # show, joints, bboxes, xcenter, sk = TfPoseEstimator.get_skeleton(show, humans, imgcopy=False) # height = show.shape[0] # width = show.shape[1] # if bboxes: # result = np.array(bboxes) # det = result[:, 0:5] # det[:, 0] = det[:, 0] * width # det[:, 1] = det[:, 1] * height # det[:, 2] = det[:, 2] * width # det[:, 3] = det[:, 3] * height # trackers = self.tracker.update(det) # # for d in trackers: # xmin = int(d[0]) # ymin = int(d[1]) # xmax = int(d[2]) # ymax = int(d[3]) # label = int(d[4]) # cv2.rectangle(show, (xmin, ymin), (xmax, ymax), # (int(settings.c[label % 32, 0]), # int(settings.c[label % 32, 1]), # int(settings.c[label % 32, 2])), 4) # # elif self.__flag_mode == 3: # self.infoBox.setText(u'当前为人体行为识别模式') # humans = poseEstimator.inference(show) # ori = np.copy(show) # show, joints, bboxes, xcenter, sk= TfPoseEstimator.get_skeleton(show, humans, imgcopy=False) # height = show.shape[0] # width = show.shape[1] # if bboxes: # result = np.array(bboxes) # det = result[:, 0:5] # det[:, 0] = det[:, 0] * width # det[:, 1] = det[:, 1] * height # det[:, 2] = det[:, 2] * width # det[:, 3] = det[:, 3] * height # trackers = self.tracker.update(det) # self.current = [i[-1] for i in trackers] # # if len(self.previous) > 0: # for item in self.previous: # if item not in self.current and item in self.data: # del self.data[item] # if item not in self.current and item in self.memory: # del self.memory[item] # # self.previous = self.current # for d in trackers: # xmin = int(d[0]) # ymin = int(d[1]) # xmax = int(d[2]) # ymax = int(d[3]) # label = int(d[4]) # try: # j = np.argmin(np.array([abs(i - (xmax + xmin) / 2.) for i in xcenter])) # except: # j = 0 # if joint_filter(joints[j]): # joints[j] = joint_completion(joint_completion(joints[j])) # if label not in self.data: # self.data[label] = [joints[j]] # self.memory[label] = 0 # else: # self.data[label].append(joints[j]) # # if len(self.data[label]) == settings.L: # pred = actionPredictor().move_status(self.data[label]) # if pred == 0: # pred = self.memory[label] # else: # self.memory[label] = pred # self.data[label].pop(0) # # location = self.data[label][-1][1] # if location[0] <= 30: # location = (51, location[1]) # if location[1] <= 10: # location = (location[0], 31) # # cv2.putText(show, settings.move_status[pred], (location[0] - 30, location[1] - 10), # cv2.FONT_HERSHEY_SIMPLEX, 0.8, # (0, 255, 0), 2) # # cv2.rectangle(show, (xmin, ymin), (xmax, ymax), # (int(settings.c[label % 32, 0]), # int(settings.c[label % 32, 1]), # int(settings.c[label % 32, 2])), 4) # show = cv.resize(frame, (settings.winWidth, settings.winHeight)) show = cv.cvtColor(show, cv.COLOR_BGR2RGB) end = time.time() self.fps = 1. / (end - start) cv.putText(show, 'FPS: %.2f' % self.fps, (30, 30), cv.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2) showImage = QtGui.QImage(show.data, show.shape[1], show.shape[0], QtGui.QImage.Format_RGB888) self.label_show_camera.setPixmap(QtGui.QPixmap.fromImage(showImage)) else: self.cap.release() self.timer_camera.stop()
] writer.writerow(f_headers) if not args.type: print('请输入具体采集的动作类型:-<') else: while cv.waitKey(1) < 0: has_frame, show = cap.read() if has_frame: fps_count += 1 frame_count += 1 # pose estimation humans = estimator.inference(show) # get pose info pose = TfPoseVisualizer.draw_pose_rgb( show, humans) # return frame, joints, bboxes, xcenter # recognize the action framewise show = framewise_recognize(pose, action_classifier) height, width = show.shape[:2] # 显示实时FPS值 if (time.time() - start_time) > fps_interval: # 计算这个interval过程中的帧数,若interval为1秒,则为FPS realtime_fps = fps_count / (time.time() - start_time) fps_count = 0 # 帧数清零 start_time = time.time() fps_label = 'FPS:{0:.2f}'.format(realtime_fps) cv.putText(show, fps_label, (width - 160, 25), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 3) # 显示检测到的人数
round(cap.get(cv.CAP_PROP_FRAME_HEIGHT)))) # # 保存关节数据的txt文件,用于训练过程(for training) # f = open('origin_data.txt', 'a+') #通过openPose提取keyPoint 然后标记keyPoint进行分类训练 最后通过openpose送到分类 while True and cap.isOpened(): #loop has_frame, show = cap.read() #framewise if has_frame: fps_count += 1 frame_count += 1 #crop image ,then ,the image into network #todo 缩小检测范围 显然影响到了检测精度 想要改变显示显然需要从节点生成后去做了 # pose estimation body_parts PartPair uidx_list humans = estimator.inference(show) #返回heatMat pafMat空间地址 # get pose info # return frame, joints, bboxes, xcenter pose = TfPoseVisualizer.draw_pose_rgb(show, humans) #pose绘制在画面上了 #save picture pose[0] 绘制好了关节点的pose图像 # cv.imwrite('test_out/'+str(frame_count)+'.jpg',pose[0]) # recognize the action framewise show = framewise_recognize(pose, action_classifier) #返回绘制好的frame #end Pose,Track,Detection. height, width = show.shape[:2] # 显示实时FPS值 if (time.time() - start_time) > fps_interval: # 计算这个interval过程中的帧数,若interval为1秒,则为FPS realtime_fps = fps_count / (time.time() - start_time) fps_count = 0 # 帧数清零 start_time = time.time() fps_label = 'FPS:{0:.2f}'.format(realtime_fps) cv.putText(show, fps_label, (width - 160, 25), cv.FONT_HERSHEY_SIMPLEX,
#reading the frame frame = imutils.resize(frame, width=400) gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) faces = face_detection.detectMultiScale(gray, scaleFactor=1.2, minNeighbors=5, minSize=(30, 30), flags=cv2.CASCADE_SCALE_IMAGE) canvas = np.zeros((250, 300, 3), dtype="uint8") frameClone = frame.copy() #pose humans = estimator.inference(frameClone) pose = TfPoseVisualizer.draw_pose_rgb(frameClone, humans) frameClone = framewise_recognize(pose, action_classifier) height, width = frameClone.shape[:2] if len(faces) > 0: faces = sorted(faces, reverse=True, key=lambda x: (x[2] - x[0]) * (x[3] - x[1]))[0] (fX, fY, fW, fH) = faces # Extract the ROI of the face from the grayscale image, resize it to a fixed 48x48 pixels, and then prepare # the ROI for classification via the CNN roi = gray[fY:fY + fH, fX:fX + fW] roi = cv2.resize(roi, (48, 48)) roi = roi.astype("float") / 255.0 roi = img_to_array(roi) roi = np.expand_dims(roi, axis=0)