Example #1
0
            # Classify action
            if DO_INFERENCE:
                prediced_label = classifier.predict(skeleton)
                print("prediced label is :", prediced_label)
            else:
                prediced_label = action_type
                print("Ground_truth label is :", prediced_label)

            if 1:
                # Draw skeleton
                if ith_skel == 0:
                    my_detector.draw(image_disp, humans)
                
                # Draw bounding box and action type
                drawActionResult(image_disp, skeleton, prediced_label)

        # Write result to txt/png
        if SAVE_RESULTANT_SKELETON_TO_TXT_AND_IMAGE:
            myio.save_skeletons(SAVE_DETECTED_SKELETON_TO 
                + int2str(ith_img, 5)+".txt", skelsInfo)
            cv2.imwrite(SAVE_DETECTED_SKELETON_IMAGES_TO 
                + int2str(ith_img, 5)+".png", image_disp)

        if 1: # Display
            cv2.imshow("action_recognition", 
                cv2.resize(image_disp,(0,0),fx=1.5,fy=1.5))
            q = cv2.waitKey(1)
            if q!=-1 and chr(q) == 'q':
                break
    def show_camera(self):
        start = time.time()
        ret, frame = self.cap.read()
        #show = cv2.resize(frame, (settings.winWidth, settings.winHeight))
        show = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        if ret:
            if self.__flag_mode == 1:
                self.infoBox.setText(u'Pose estimation')
                humans = my_detector.detect(show)
                skelsInfo = SkeletonDetector.humans_to_skelsInfo(humans)
                print('SkelsInfo: ', skelsInfo)
                for ith_skel in range(0, len(skelsInfo)):
                    skeleton = SkeletonDetector.get_ith_skeleton(
                        skelsInfo, ith_skel)
                    if ith_skel == 0:
                        my_detector.draw(show, humans)

            elif self.__flag_mode == 2:
                self.infoBox.setText(u'Multiplayer tracking')
                prediced_label = ''
                humans = my_detector.detect(show)
                skelsInfo = SkeletonDetector.humans_to_skelsInfo(humans)
                for ith_skel in range(0, len(skelsInfo)):
                    skeleton = SkeletonDetector.get_ith_skeleton(
                        skelsInfo, ith_skel)
                    if ith_skel == 0:
                        my_detector.draw(show, humans)
                        print('show.shape: ', show.data)
                    drawActionResult(
                        show, skeleton,
                        prediced_label)  #draw bboxs, prediced label
                #show = np.array(show)

            elif self.__flag_mode == 3:
                self.infoBox.setText(u'Sign language recognition')
                humans = my_detector.detect(show)
                skelsInfo_choose = SkeletonDetector.humans_to_skelsInfo_choose(
                    humans, joint_choose)
                skelsInfo = SkeletonDetector.humans_to_skelsInfo(humans)
                if len(skelsInfo) == 0:
                    self.text = ''
                for ith_skel in range(0, len(skelsInfo)):
                    skeleton = SkeletonDetector.get_ith_skeleton(
                        skelsInfo, ith_skel)
                    skeleton_choose = SkeletonDetector.get_ith_skeleton_choose(
                        skelsInfo_choose, ith_skel)
                    print('skeleton_choose.shape: ', skeleton_choose)
                    if len(skeleton_choose) == 20:
                        prediced_label, acc = classifier.predict(
                            skeleton_choose)
                        if acc < 0.95:
                            prediced_label = ''
                        if prediced_label != self.flag:
                            self.text = self.text + ' ' + prediced_label
                        self.flag = prediced_label
                    else:
                        prediced_label = ''
                    my_detector.draw(show, humans)
                    drawActionResult(show, skeleton, prediced_label)
                    self.textBox.setText('ID-1: ' + self.text)

            end = time.time()
            self.fps = 1. / (end - start)
            cv2.putText(show, 'FPS: %.2f' % self.fps, (30, 30),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
            showImage = QtGui.QImage(show, show.shape[1], show.shape[0],
                                     QtGui.QImage.Format_RGB888)
            self.label_show_camera.setPixmap(
                QtGui.QPixmap.fromImage(showImage))