logger.debug('cam read+') cam = cv2.VideoCapture('videos/' + args.video + '.mp4') ret_val, image = cam.read() logger.info('cam image=%dx%d' % (image.shape[1], image.shape[0])) logger.info('FPS: ' + str(cam.get(cv2.CAP_PROP_FPS))) with open('videos/' + args.video + '.fps', 'w+') as f: f.write(str(cam.get(cv2.CAP_PROP_FPS))) n_frames = int(cam.get(cv2.CAP_PROP_FRAME_COUNT)) all_cords = [] for _ in tqdm(range(n_frames)): ret_val, image = cam.read() if not ret_val: break humans = e.inference(image, resize_to_default=(w > 0 and h > 0), upsample_size=args.resize_out_ratio) image = TfPoseEstimator.draw_humans(image, humans, imgcopy=False) all_cords.append(TfPoseEstimator.get_cords(image, humans, imgcopy=False)) if cv2.waitKey(1) == 27: break cv2.destroyAllWindows() print(len(all_cords)) print(cam.get(cv2.CAP_PROP_FRAME_COUNT)) with open('keypoints/' + args.video + '.keypoints.json', 'w+') as f: json.dump(all_cords, f)
print('VIDEO PATH:', video_path) print('KEYPOINTS PATH:', choreo_keypoints_path) sec_start_time = time.time() curr_cords = [] last_cords = [time.time(), [{}]] part_linger_max = 3 part_linger = {} while time.time() - start_time + 1 < len(choreo_json) / fps: ret_val, image = cam.read() humans = e.inference(image, resize_to_default=(w > 0 and h > 0), upsample_size=4.) image = TfPoseEstimator.draw_humans(image, humans, imgcopy=False) curr_cords.append([time.time(), TfPoseEstimator.get_cords(image, humans, imgcopy=False)]) print(last_cords, 'a') if len(last_cords[1]) == 0: last_cords[1].append({}) for key in last_cords[1][0].keys(): if len(curr_cords[-1][1]) == 0: curr_cords[-1][1].append({}) if key not in curr_cords[-1][1][0]: if key not in part_linger: part_linger[key] = 0 part_linger[key] += 1 if part_linger[key] > 3: part_linger[key] = 0 else: curr_cords[-1][1][0][key] = last_cords[1][0][key] last_cords = curr_cords[-1]