def run(self): global height, width self.cap = cv2.VideoCapture(0, cv2.CAP_DSHOW) self.cap.set(3, 480) self.cap.set(4, 640) self.cap.set(5, 7) self.fourcc = cv2.VideoWriter_fourcc(*'XVID') self.out = cv2.VideoWriter('Wideo/Przysiady.avi', self.fourcc, 3, (640, 480)) while True: ret, frame = self.cap.read() if ret: allPerson = estimator.inference(frame, resize_to_default=(w > 0 and h > 0), upsample_size=4.0) frame = TfPoseEstimator.draw_humans(frame, allPerson, imgcopy=False) height, width = frame.shape[0], frame.shape[1] if len(allPerson) > 0: # dystans foot_distance = int( euclidianDistance(findPoint(allPerson, 10), findPoint(allPerson, 13))) knee_distance = int( euclidianDistance(findPoint(allPerson, 9), findPoint(allPerson, 12))) shoulders_distance = int( euclidianDistance(findPoint(allPerson, 2), findPoint(allPerson, 5))) # katy angleKneeR = angleCount(findPoint(allPerson, 11), findPoint(allPerson, 12), findPoint(allPerson, 13)) angleKneeL = angleCount(findPoint(allPerson, 8), findPoint(allPerson, 9), findPoint(allPerson, 10)) angleNeckR = angleCount(findPoint(allPerson, 8), findPoint(allPerson, 1), findPoint(allPerson, 0)) angleNeckL = angleCount(findPoint(allPerson, 11), findPoint(allPerson, 1), findPoint(allPerson, 0)) if angleNeckR < 120: cv2.putText(frame, "Patrz przed siebie!", (20, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, color=(255, 255, 255), thickness=2, lineType=cv2.LINE_8) if shoulders_distance <= 0.55 * foot_distance: cv2.putText(frame, "Stopy za szeroko!", (20, 55), cv2.FONT_HERSHEY_SIMPLEX, 1, color=(255, 255, 255), thickness=2, lineType=cv2.LINE_8) if shoulders_distance >= 1.3 * foot_distance: cv2.putText(frame, "Rozszerz stopy!", (20, 80), cv2.FONT_HERSHEY_SIMPLEX, 1, color=(255, 255, 255), thickness=2, lineType=cv2.LINE_8) if knee_distance <= 0.7 * foot_distance: cv2.putText(frame, "Kolana zbyt wasko!", (20, 105), cv2.FONT_HERSHEY_SIMPLEX, 1, color=(255, 255, 255), thickness=2, lineType=cv2.LINE_8) if knee_distance >= 1.35 * foot_distance: cv2.putText(frame, "Kolana za szeroko!", (20, 130), cv2.FONT_HERSHEY_SIMPLEX, 1, color=(255, 255, 255), thickness=2, lineType=cv2.LINE_8) if squats(angleKneeR, angleKneeL, angleNeckR, angleNeckL) and ( knee_distance >= 0.7 * foot_distance) and ( foot_distance <= 1.35 * knee_distance) \ and (shoulders_distance >= 0.55 * foot_distance) and ( shoulders_distance <= 1.3 * foot_distance): frame = TfPoseEstimator.draw_humans2(frame, allPerson, imgcopy=False) if squatsDone(angleKneeR, angleKneeL): frame = TfPoseEstimator.draw_humans3(frame, allPerson, imgcopy=False) if squatDoneScore(angleKneeR, angleKneeL): frame = TfPoseEstimator.draw_humans4( frame, allPerson, imgcopy=False) cv2.putText(frame, "Powrot do gory!", (20, 80), cv2.FONT_HERSHEY_SIMPLEX, 1, color=(255, 255, 255), thickness=2, lineType=cv2.LINE_8) else: cv2.putText(frame, "Brak uzytkownika na obrazie!", (20, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, color=(255, 255, 255), thickness=2, lineType=cv2.LINE_8) qImg4 = QImage(frame.data, width, height, QImage.Format_BGR888) self.changePixmap.emit(qImg4) self.out.write(frame)
def run(self): global height, width self.cap = cv2.VideoCapture(0, cv2.CAP_DSHOW) self.cap.set(3, 480) self.cap.set(4, 640) self.cap.set(5, 7) self.fourcc = cv2.VideoWriter_fourcc(*'XVID') self.out = cv2.VideoWriter('Wideo/Pompki.avi', self.fourcc, 3, (640, 480)) while True: ret, frame = self.cap.read() if ret: allPerson = estimator.inference(frame, resize_to_default=(w > 0 and h > 0), upsample_size=4.0) frame = TfPoseEstimator.draw_humans(frame, allPerson, imgcopy=False) height, width = frame.shape[0], frame.shape[1] if len(allPerson) > 0: # katy angleAnkleR = angleCount(findPoint(allPerson, 4), findPoint(allPerson, 3), findPoint(allPerson, 2)) angleAnkleL = angleCount(findPoint(allPerson, 7), findPoint(allPerson, 6), findPoint(allPerson, 5)) angleKneeR = angleCount(findPoint(allPerson, 8), findPoint(allPerson, 9), findPoint(allPerson, 10)) angleKneeL = angleCount(findPoint(allPerson, 11), findPoint(allPerson, 12), findPoint(allPerson, 13)) angleHipR = angleCount(findPoint(allPerson, 1), findPoint(allPerson, 8), findPoint(allPerson, 9)) angleHipL = angleCount(findPoint(allPerson, 1), findPoint(allPerson, 11), findPoint(allPerson, 12)) angleNeckR = angleCount(findPoint(allPerson, 8), findPoint(allPerson, 1), findPoint(allPerson, 0)) angleNeckL = angleCount(findPoint(allPerson, 11), findPoint(allPerson, 1), findPoint(allPerson, 0)) if angleKneeR < 145: cv2.putText(frame, "Nie zginaj kolan!", (20, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, color=(255, 255, 255), thickness=2, lineType=cv2.LINE_8) if angleHipR < 145: cv2.putText(frame, "Biodra nizej!", (20, 55), cv2.FONT_HERSHEY_SIMPLEX, 1, color=(255, 255, 255), thickness=2, lineType=cv2.LINE_8) if angleNeckR < 150: cv2.putText(frame, "Patrz przed siebie!", (20, 80), cv2.FONT_HERSHEY_SIMPLEX, 1, color=(255, 255, 255), thickness=2, lineType=cv2.LINE_8) if pushUps(angleAnkleR, angleAnkleL, angleKneeR, angleKneeL, angleHipR, angleHipL, angleNeckR, angleNeckL): frame = TfPoseEstimator.draw_humans2(frame, allPerson, imgcopy=False) if pushUpsDone(angleAnkleR, angleAnkleL): frame = TfPoseEstimator.draw_humans3(frame, allPerson, imgcopy=False) if pushUpsDoneScore(angleAnkleR, angleAnkleL): frame = TfPoseEstimator.draw_humans4( frame, allPerson, imgcopy=False) cv2.putText(frame, "Powrot do gory!", (20, 105), cv2.FONT_HERSHEY_SIMPLEX, 1, color=(255, 255, 255), thickness=2, lineType=cv2.LINE_8) else: cv2.putText(frame, "Brak uzytkownika na obrazie!", (20, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, color=(255, 255, 255), thickness=2, lineType=cv2.LINE_8) qImg2 = QImage(frame.data, width, height, QImage.Format_BGR888) self.changePixmap.emit(qImg2) self.out.write(frame)
def run(self): global height, width self.cap = cv2.VideoCapture(0, cv2.CAP_DSHOW) self.cap.set(3, 480) self.cap.set(4, 640) self.cap.set(5, 7) self.fourcc = cv2.VideoWriter_fourcc(*'XVID') self.out = cv2.VideoWriter('Wideo/Plank.avi', self.fourcc, 3, (640, 480)) while True: ret, frame = self.cap.read() if ret: allPerson = estimator.inference(frame, resize_to_default=(w > 0 and h > 0), upsample_size=4.0) frame = TfPoseEstimator.draw_humans(frame, allPerson, imgcopy=False) height, width = frame.shape[0], frame.shape[1] if len(allPerson) > 0: # dystans foot_distance = int( euclidianDistance(findPoint(allPerson, 10), findPoint(allPerson, 13))) ankle_distance = int( euclidianDistance(findPoint(allPerson, 3), findPoint(allPerson, 6))) # katy angleAnkleR = angleCount(findPoint(allPerson, 7), findPoint(allPerson, 6), findPoint(allPerson, 5)) angleAnkleL = angleCount(findPoint(allPerson, 4), findPoint(allPerson, 3), findPoint(allPerson, 2)) angleKneeR = angleCount(findPoint(allPerson, 8), findPoint(allPerson, 9), findPoint(allPerson, 10)) angleKneeL = angleCount(findPoint(allPerson, 11), findPoint(allPerson, 12), findPoint(allPerson, 13)) angleHipR = angleCount(findPoint(allPerson, 1), findPoint(allPerson, 8), findPoint(allPerson, 9)) angleHipL = angleCount(findPoint(allPerson, 1), findPoint(allPerson, 11), findPoint(allPerson, 12)) angleNeckR = angleCount(findPoint(allPerson, 0), findPoint(allPerson, 1), findPoint(allPerson, 8)) angleNeckL = angleCount(findPoint(allPerson, 0), findPoint(allPerson, 1), findPoint(allPerson, 11)) if angleAnkleR < 80: cv2.putText(frame, "Trzymaj kat prosty w lokciach!", (20, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, color=(255, 255, 255), thickness=2, lineType=cv2.LINE_8) if angleKneeR < 125: cv2.putText(frame, "Nie zginaj kolan!", (20, 55), cv2.FONT_HERSHEY_SIMPLEX, 1, color=(255, 255, 255), thickness=2, lineType=cv2.LINE_8) if angleHipR < 115: cv2.putText(frame, "Biodra nizej!", (20, 80), cv2.FONT_HERSHEY_SIMPLEX, 1, color=(255, 255, 255), thickness=2, lineType=cv2.LINE_8) if angleNeckR < 130: cv2.putText(frame, "Patrz przed siebie!", (20, 105), cv2.FONT_HERSHEY_SIMPLEX, 1, color=(255, 255, 255), thickness=2, lineType=cv2.LINE_8) if foot_distance > ankle_distance: cv2.putText(frame, "Stopy za szeroko!", (20, 105), cv2.FONT_HERSHEY_SIMPLEX, 1, color=(255, 255, 255), thickness=2, lineType=cv2.LINE_8) if plank(angleAnkleR, angleAnkleL, angleKneeR, angleKneeL, angleHipR, angleHipL, angleNeckR, angleNeckL) and foot_distance < ankle_distance: frame = TfPoseEstimator.draw_humans2(frame, allPerson, imgcopy=False) if plankDone(angleAnkleR, angleAnkleL): frame = TfPoseEstimator.draw_humans3(frame, allPerson, imgcopy=False) cv2.putText(frame, "Tak trzymaj!", (20, 130), cv2.FONT_HERSHEY_SIMPLEX, 1, color=(255, 255, 255), thickness=2, lineType=cv2.LINE_8) else: cv2.putText(frame, "Brak uzytkownika na obrazie!", (20, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, color=(255, 255, 255), thickness=2, lineType=cv2.LINE_8) qImg3 = QImage(frame.data, width, height, QImage.Format_BGR888) self.changePixmap.emit(qImg3) self.out.write(frame)
def run(self): global counter, start_time global height, width self.cap = cv2.VideoCapture(0, cv2.CAP_DSHOW) self.cap.set(3, 480) self.cap.set(4, 640) self.cap.set(5, 7) self.fourcc = cv2.VideoWriter_fourcc(*'XVID') self.out = cv2.VideoWriter('Wideo/Wykroki.avi', self.fourcc, 3, (640, 480)) while True: ret, frame = self.cap.read() if ret: allPerson = estimator.inference(frame, resize_to_default=(w > 0 and h > 0), upsample_size=4.0) frame = TfPoseEstimator.draw_humans(frame, allPerson, imgcopy=False) height, width = frame.shape[0], frame.shape[1] if len(allPerson) > 0: # katy angleKneeR = angleCount(findPoint(allPerson, 10), findPoint(allPerson, 9), findPoint(allPerson, 8)) angleKneeL = angleCount(findPoint(allPerson, 13), findPoint(allPerson, 12), findPoint(allPerson, 11)) angleNeckR = angleCount(findPoint(allPerson, 8), findPoint(allPerson, 1), findPoint(allPerson, 0)) angleNeckL = angleCount(findPoint(allPerson, 11), findPoint(allPerson, 1), findPoint(allPerson, 0)) # dystans wrist_hipR = int( euclidianDistance(findPoint(allPerson, 4), findPoint(allPerson, 8))) wrist_hipL = int( euclidianDistance(findPoint(allPerson, 7), findPoint(allPerson, 11))) wrist_noseR = int( euclidianDistance(findPoint(allPerson, 4), findPoint(allPerson, 0))) wrist_noseL = int( euclidianDistance(findPoint(allPerson, 7), findPoint(allPerson, 0))) if angleKneeR < 80: cv2.putText(frame, "Zachowaj kat prosty w kolanie!", (20, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, color=(255, 255, 255), thickness=2, lineType=cv2.LINE_8) if angleNeckR < 110 or angleNeckL > 150: cv2.putText(frame, "Patrz przed siebie!", (20, 55), cv2.FONT_HERSHEY_SIMPLEX, 1, color=(255, 255, 255), thickness=2, lineType=cv2.LINE_8) if wrist_hipR > wrist_noseR: cv2.putText(frame, "Trzymaj rece przy ciele!", (20, 80), cv2.FONT_HERSHEY_SIMPLEX, 1, color=(255, 255, 255), thickness=2, lineType=cv2.LINE_8) if lungesStart(angleKneeR, angleKneeL, angleNeckR, angleNeckL) and (wrist_hipL < wrist_noseL or wrist_hipR < wrist_noseR): frame = TfPoseEstimator.draw_humans2(frame, allPerson, imgcopy=False) if lungesDone(angleKneeR, angleKneeL): frame = TfPoseEstimator.draw_humans3(frame, allPerson, imgcopy=False) if lungesDoneScore(angleKneeR, angleKneeL): frame = TfPoseEstimator.draw_humans4( frame, allPerson, imgcopy=False) cv2.putText(frame, "Powrot do gory!", (20, 105), cv2.FONT_HERSHEY_SIMPLEX, 1, color=(255, 255, 255), thickness=2, lineType=cv2.LINE_8) else: cv2.putText(frame, "Brak uzytkownika na obrazie!", (20, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, color=(255, 255, 255), thickness=2, lineType=cv2.LINE_8) qImg1 = QImage(frame.data, width, height, QImage.Format_BGR888) self.changePixmap.emit(qImg1) self.out.write(frame)