def play(self, is_train, video_index):
     self.scd = PgdSkeleton(Path.home() / 'intentlong', is_train,
                            self.img_size)
     res = self.scd[video_index]
     coord_norm = res[PG.COORD_NORM]
     coord_norm = np.transpose(coord_norm, (0, 2, 1))
     coord = coord_norm * np.array(self.img_size)
     img_shape = self.img_size[::-1] + (3, )
     kps = [
         KeypointsOnImage.from_xy_array(coord_JX, shape=img_shape)
         for coord_JX in coord
     ]
     cap = cv2.VideoCapture(str(res[PG.VIDEO_PATH]))
     v_size = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
     v_fps = int(cap.get(cv2.CAP_PROP_FPS))
     duration = int(1000 / v_fps)
     for n in range(v_size):
         ret, img = cap.read()
         re_img = cv2.resize(img, self.img_size)
         pOnImg = kps[n]
         img_kps = pOnImg.draw_on_image(re_img)
         if self.is_unittest:
             break
         cv2.imshow("Play saved keypoint results", img_kps)
         cv2.waitKey(duration)
     cap.release()
Пример #2
0
 def play_dataset_video(self, is_train, video_index, show=True):
     self.scd = PgdSkeleton(Path.home() / 'PoliceGestureLong', is_train, self.img_size)
     res = self.scd[video_index]
     print('Playing %s' % res[PG.VIDEO_NAME])
     coord_norm_FXJ = res[PG.COORD_NORM]  # Shape: F,X,J
     coord_norm_FJX = np.transpose(coord_norm_FXJ, (0, 2, 1))  # FJX
     coord = coord_norm_FJX * np.array(self.img_size)
     img_shape = self.img_size[::-1] + (3,)
     kps = [KeypointsOnImage.from_xy_array(coord_JX, shape=img_shape) for coord_JX in coord]  # (frames, KeyOnImage)
     cap = cv2.VideoCapture(str(res[PG.VIDEO_PATH]))
     v_size = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
     v_fps = int(cap.get(cv2.CAP_PROP_FPS))
     duration = int(1000/(v_fps*4))
     gestures = []  # Full video gesture recognition results
     for n in range(v_size):
         gdict = self.gpred.from_skeleton(coord_norm_FXJ[n][np.newaxis])
         gesture = gdict[PG.OUT_ARGMAX]
         gestures.append(gesture)
         if not show:
             continue
         ret, img = cap.read()
         re_img = cv2.resize(img, self.img_size)
         ges_name = self.gesture_dict[gesture]
         re_img = draw_text(re_img, 50, 100, ges_name, (255, 50, 50), size=40)
         pOnImg = kps[n]
         img_kps = pOnImg.draw_on_image(re_img)
         if self.is_unittest:
             break
         cv2.imshow("Play saved keypoint results", img_kps)
         cv2.waitKey(duration)
     cap.release()
     gestures = np.array(gestures, np.int)
     res[PG.PRED_GESTURES] = gestures
     print('The prediction of video ', res[PG.VIDEO_NAME], ' is completed')
     return res
Пример #3
0
def save():

    ds = PgdSkeleton(Path.home() / 'PoliceGestureLong',
                     is_train=True,
                     resize_img_size=(512, 512))
    loader = DataLoader(ds,
                        batch_size=1,
                        shuffle=False,
                        num_workers=0,
                        collate_fn=lambda x: x)
    for d in loader:
        pass